forked from NationTech/harmony
Compare commits
45 Commits
doc/worker
...
28476fbac4
| Author | SHA1 | Date | |
|---|---|---|---|
| 28476fbac4 | |||
| 06c9d78049 | |||
| e80ad70a4f | |||
| 45b4b082c8 | |||
| 7b542c9865 | |||
| c80ede706b | |||
| b2825ec1ef | |||
| 609d7acb5d | |||
| de761cf538 | |||
| c069207f12 | |||
|
|
7368184917 | ||
| 05205f4ac1 | |||
| 3174645c97 | |||
| 7536f4ec4b | |||
| 464347d3e5 | |||
| 7f415f5b98 | |||
| 2a520a1d7c | |||
| 987f195e2f | |||
| 14d1823d15 | |||
| 2a48d51479 | |||
| 20a227bb41 | |||
| ce91ee0168 | |||
| ed7f81aa1f | |||
| cb66b7592e | |||
| a815f6ac9c | |||
| 2d891e4463 | |||
| f66e58b9ca | |||
| ea39d93aa7 | |||
| 6989d208cf | |||
| c0d54a4466 | |||
| fc384599a1 | |||
| c0bd8007c7 | |||
| 7dff70edcf | |||
| 06a0c44c3c | |||
| 85bec66e58 | |||
| 1f3796f503 | |||
| cf576192a8 | |||
| 5f78300d78 | |||
| f7e9669009 | |||
| 2d3c32469c | |||
| f65e16df7b | |||
| 1cec398d4d | |||
| 58b6268989 | |||
| 4a500e4eb7 | |||
| f073b7e5fb |
101
Cargo.lock
generated
101
Cargo.lock
generated
@@ -429,6 +429,15 @@ dependencies = [
|
|||||||
"wait-timeout",
|
"wait-timeout",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "assertor"
|
||||||
|
version = "0.0.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb"
|
||||||
|
dependencies = [
|
||||||
|
"num-traits",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-broadcast"
|
name = "async-broadcast"
|
||||||
version = "0.7.2"
|
version = "0.7.2"
|
||||||
@@ -665,6 +674,22 @@ dependencies = [
|
|||||||
"serde_with",
|
"serde_with",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "brocade"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"env_logger",
|
||||||
|
"harmony_secret",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"regex",
|
||||||
|
"russh",
|
||||||
|
"russh-keys",
|
||||||
|
"serde",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "brotli"
|
name = "brotli"
|
||||||
version = "8.0.2"
|
version = "8.0.2"
|
||||||
@@ -1694,6 +1719,24 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-ha-cluster"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_secret",
|
||||||
|
"harmony_tui",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"serde",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-kube-rs"
|
name = "example-kube-rs"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -1755,6 +1798,7 @@ dependencies = [
|
|||||||
name = "example-nanodc"
|
name = "example-nanodc"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1763,6 +1807,7 @@ dependencies = [
|
|||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
|
"serde",
|
||||||
"tokio",
|
"tokio",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
@@ -1781,6 +1826,7 @@ dependencies = [
|
|||||||
name = "example-okd-install"
|
name = "example-okd-install"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1795,17 +1841,50 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-openbao"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-opnsense"
|
name = "example-opnsense"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_secret",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"serde",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-opnsense-2"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
"harmony_macros",
|
"harmony_macros",
|
||||||
|
"harmony_secret",
|
||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
|
"serde",
|
||||||
"tokio",
|
"tokio",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
@@ -1814,6 +1893,7 @@ dependencies = [
|
|||||||
name = "example-pxe"
|
name = "example-pxe"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1828,6 +1908,15 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-remove-rook-osd"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-rust"
|
name = "example-rust"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -2305,9 +2394,11 @@ name = "harmony"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"askama",
|
"askama",
|
||||||
|
"assertor",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"bollard",
|
"bollard",
|
||||||
|
"brocade",
|
||||||
"chrono",
|
"chrono",
|
||||||
"cidr",
|
"cidr",
|
||||||
"convert_case",
|
"convert_case",
|
||||||
@@ -2338,6 +2429,7 @@ dependencies = [
|
|||||||
"once_cell",
|
"once_cell",
|
||||||
"opnsense-config",
|
"opnsense-config",
|
||||||
"opnsense-config-xml",
|
"opnsense-config-xml",
|
||||||
|
"option-ext",
|
||||||
"pretty_assertions",
|
"pretty_assertions",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
"russh",
|
"russh",
|
||||||
@@ -3878,6 +3970,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
|||||||
name = "opnsense-config"
|
name = "opnsense-config"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"assertor",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"chrono",
|
"chrono",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
@@ -4537,9 +4630,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "1.11.2"
|
version = "1.11.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aho-corasick 1.1.3",
|
"aho-corasick 1.1.3",
|
||||||
"memchr",
|
"memchr",
|
||||||
@@ -4549,9 +4642,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex-automata"
|
name = "regex-automata"
|
||||||
version = "0.4.10"
|
version = "0.4.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
|
checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aho-corasick 1.1.3",
|
"aho-corasick 1.1.3",
|
||||||
"memchr",
|
"memchr",
|
||||||
|
|||||||
15
Cargo.toml
15
Cargo.toml
@@ -14,7 +14,9 @@ members = [
|
|||||||
"harmony_composer",
|
"harmony_composer",
|
||||||
"harmony_inventory_agent",
|
"harmony_inventory_agent",
|
||||||
"harmony_secret_derive",
|
"harmony_secret_derive",
|
||||||
"harmony_secret", "adr/agent_discovery/mdns",
|
"harmony_secret",
|
||||||
|
"adr/agent_discovery/mdns",
|
||||||
|
"brocade",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -66,5 +68,12 @@ thiserror = "2.0.14"
|
|||||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||||
serde_json = "1.0.127"
|
serde_json = "1.0.127"
|
||||||
askama = "0.14"
|
askama = "0.14"
|
||||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
|
||||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
reqwest = { version = "0.12", features = [
|
||||||
|
"blocking",
|
||||||
|
"stream",
|
||||||
|
"rustls-tls",
|
||||||
|
"http2",
|
||||||
|
"json",
|
||||||
|
], default-features = false }
|
||||||
|
assertor = "0.0.4"
|
||||||
|
|||||||
18
brocade/Cargo.toml
Normal file
18
brocade/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "brocade"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
async-trait.workspace = true
|
||||||
|
harmony_types = { path = "../harmony_types" }
|
||||||
|
russh.workspace = true
|
||||||
|
russh-keys.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
log.workspace = true
|
||||||
|
env_logger.workspace = true
|
||||||
|
regex = "1.11.3"
|
||||||
|
harmony_secret = { path = "../harmony_secret" }
|
||||||
|
serde.workspace = true
|
||||||
70
brocade/examples/main.rs
Normal file
70
brocade/examples/main.rs
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
|
use harmony_secret::{Secret, SecretManager};
|
||||||
|
use harmony_types::switch::PortLocation;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct BrocadeSwitchAuth {
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||||
|
|
||||||
|
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
||||||
|
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
||||||
|
let switch_addresses = vec![ip];
|
||||||
|
|
||||||
|
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let brocade = brocade::init(
|
||||||
|
&switch_addresses,
|
||||||
|
22,
|
||||||
|
&config.username,
|
||||||
|
&config.password,
|
||||||
|
Some(BrocadeOptions {
|
||||||
|
dry_run: true,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Brocade client failed to connect");
|
||||||
|
|
||||||
|
let entries = brocade.get_stack_topology().await.unwrap();
|
||||||
|
println!("Stack topology: {entries:#?}");
|
||||||
|
|
||||||
|
let entries = brocade.get_interfaces().await.unwrap();
|
||||||
|
println!("Interfaces: {entries:#?}");
|
||||||
|
|
||||||
|
let version = brocade.version().await.unwrap();
|
||||||
|
println!("Version: {version:?}");
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
|
||||||
|
println!("VLAN\tMAC\t\t\tPORT");
|
||||||
|
for mac in mac_adddresses {
|
||||||
|
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let channel_name = "1";
|
||||||
|
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let channel_id = brocade.find_available_channel_id().await.unwrap();
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let channel_name = "HARMONY_LAG";
|
||||||
|
let ports = [PortLocation(2, 0, 35)];
|
||||||
|
brocade
|
||||||
|
.create_port_channel(channel_id, channel_name, &ports)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
212
brocade/src/fast_iron.rs
Normal file
212
brocade/src/fast_iron.rs
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
use super::BrocadeClient;
|
||||||
|
use crate::{
|
||||||
|
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
||||||
|
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
|
use log::{debug, info};
|
||||||
|
use regex::Regex;
|
||||||
|
use std::{collections::HashSet, str::FromStr};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct FastIronClient {
|
||||||
|
shell: BrocadeShell,
|
||||||
|
version: BrocadeInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FastIronClient {
|
||||||
|
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
||||||
|
shell.before_all(vec!["skip-page-display".into()]);
|
||||||
|
shell.after_all(vec!["page".into()]);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
shell,
|
||||||
|
version: version_info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
||||||
|
debug!("[Brocade] Parsing mac address entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 3 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (vlan, mac_address, port) = match parts.len() {
|
||||||
|
3 => (
|
||||||
|
u16::from_str(parts[0]).ok()?,
|
||||||
|
parse_brocade_mac_address(parts[1]).ok()?,
|
||||||
|
parts[2].to_string(),
|
||||||
|
),
|
||||||
|
_ => (
|
||||||
|
1,
|
||||||
|
parse_brocade_mac_address(parts[0]).ok()?,
|
||||||
|
parts[1].to_string(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let port =
|
||||||
|
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
||||||
|
|
||||||
|
match port {
|
||||||
|
Ok(p) => Some(Ok(MacAddressEntry {
|
||||||
|
vlan,
|
||||||
|
mac_address,
|
||||||
|
port: p,
|
||||||
|
})),
|
||||||
|
Err(e) => Some(Err(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
||||||
|
debug!("[Brocade] Parsing stack port entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 10 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let local_port = PortLocation::from_str(parts[0]).ok()?;
|
||||||
|
|
||||||
|
Some(Ok(InterSwitchLink {
|
||||||
|
local_port,
|
||||||
|
remote_port: None,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_port_channel_commands(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Vec<String> {
|
||||||
|
let mut commands = vec![
|
||||||
|
"configure terminal".to_string(),
|
||||||
|
format!("lag {channel_name} static id {channel_id}"),
|
||||||
|
];
|
||||||
|
|
||||||
|
for port in ports {
|
||||||
|
commands.push(format!("ports ethernet {port}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
commands.push(format!("primary-port {}", ports[0]));
|
||||||
|
commands.push("deploy".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
commands.push("write memory".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
|
||||||
|
commands
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl BrocadeClient for FastIronClient {
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||||
|
Ok(self.version.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||||
|
info!("[Brocade] Showing MAC address table...");
|
||||||
|
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show mac-address", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(2)
|
||||||
|
.filter_map(|line| self.parse_mac_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show interface stack-ports", crate::ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(1)
|
||||||
|
.filter_map(|line| self.parse_stack_port_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
_interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||||
|
info!("[Brocade] Finding next available channel id...");
|
||||||
|
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show lag", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex");
|
||||||
|
|
||||||
|
let used_ids: HashSet<u8> = output
|
||||||
|
.lines()
|
||||||
|
.filter_map(|line| {
|
||||||
|
re.captures(line)
|
||||||
|
.and_then(|c| c.get(1))
|
||||||
|
.and_then(|id_match| id_match.as_str().parse().ok())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut next_id: u8 = 1;
|
||||||
|
loop {
|
||||||
|
if !used_ids.contains(&next_id) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("[Brocade] Found channel id: {next_id}");
|
||||||
|
Ok(next_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
info!(
|
||||||
|
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
||||||
|
);
|
||||||
|
|
||||||
|
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Privileged)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
||||||
|
info!("[Brocade] Clearing port-channel: {channel_name}");
|
||||||
|
|
||||||
|
let commands = vec![
|
||||||
|
"configure terminal".to_string(),
|
||||||
|
format!("no lag {channel_name}"),
|
||||||
|
"write memory".to_string(),
|
||||||
|
];
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Privileged)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
338
brocade/src/lib.rs
Normal file
338
brocade/src/lib.rs
Normal file
@@ -0,0 +1,338 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use std::{
|
||||||
|
fmt::{self, Display},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::network_operating_system::NetworkOperatingSystemClient;
|
||||||
|
use crate::{
|
||||||
|
fast_iron::FastIronClient,
|
||||||
|
shell::{BrocadeSession, BrocadeShell},
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::net::MacAddress;
|
||||||
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
mod fast_iron;
|
||||||
|
mod network_operating_system;
|
||||||
|
mod shell;
|
||||||
|
mod ssh;
|
||||||
|
|
||||||
|
#[derive(Default, Clone, Debug)]
|
||||||
|
pub struct BrocadeOptions {
|
||||||
|
pub dry_run: bool,
|
||||||
|
pub ssh: ssh::SshOptions,
|
||||||
|
pub timeouts: TimeoutConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct TimeoutConfig {
|
||||||
|
pub shell_ready: Duration,
|
||||||
|
pub command_execution: Duration,
|
||||||
|
pub command_output: Duration,
|
||||||
|
pub cleanup: Duration,
|
||||||
|
pub message_wait: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TimeoutConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
shell_ready: Duration::from_secs(10),
|
||||||
|
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
||||||
|
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
|
||||||
|
cleanup: Duration::from_secs(10),
|
||||||
|
message_wait: Duration::from_millis(500),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum ExecutionMode {
|
||||||
|
Regular,
|
||||||
|
Privileged,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct BrocadeInfo {
|
||||||
|
os: BrocadeOs,
|
||||||
|
version: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum BrocadeOs {
|
||||||
|
NetworkOperatingSystem,
|
||||||
|
FastIron,
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||||
|
pub struct MacAddressEntry {
|
||||||
|
pub vlan: u16,
|
||||||
|
pub mac_address: MacAddress,
|
||||||
|
pub port: PortDeclaration,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type PortChannelId = u8;
|
||||||
|
|
||||||
|
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
|
||||||
|
///
|
||||||
|
/// This structure provides a standardized view of the topology regardless of the
|
||||||
|
/// underlying Brocade OS configuration (stacking vs. fabric).
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub struct InterSwitchLink {
|
||||||
|
/// The local port on the switch where the topology command was run.
|
||||||
|
pub local_port: PortLocation,
|
||||||
|
/// The port on the directly connected neighboring switch.
|
||||||
|
pub remote_port: Option<PortLocation>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents the key running configuration status of a single switch interface.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub struct InterfaceInfo {
|
||||||
|
/// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
|
||||||
|
pub name: String,
|
||||||
|
/// The physical location of the interface.
|
||||||
|
pub port_location: PortLocation,
|
||||||
|
/// The parsed type and name prefix of the interface.
|
||||||
|
pub interface_type: InterfaceType,
|
||||||
|
/// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
|
||||||
|
pub operating_mode: Option<PortOperatingMode>,
|
||||||
|
/// Indicates the current state of the interface.
|
||||||
|
pub status: InterfaceStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Categorizes the functional type of a switch interface.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum InterfaceType {
|
||||||
|
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
|
||||||
|
Ethernet(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for InterfaceType {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
InterfaceType::Ethernet(name) => write!(f, "{name}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum PortOperatingMode {
|
||||||
|
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
||||||
|
Fabric,
|
||||||
|
/// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
|
||||||
|
Trunk,
|
||||||
|
/// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
|
||||||
|
Access,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines the possible status of an interface.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum InterfaceStatus {
|
||||||
|
/// The interface is connected.
|
||||||
|
Connected,
|
||||||
|
/// The interface is not connected and is not expected to be.
|
||||||
|
NotConnected,
|
||||||
|
/// The interface is not connected but is expected to be (configured with `no shutdown`).
|
||||||
|
SfpAbsent,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init(
|
||||||
|
ip_addresses: &[IpAddr],
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: Option<BrocadeOptions>,
|
||||||
|
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
||||||
|
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
||||||
|
|
||||||
|
let version_info = shell
|
||||||
|
.with_session(ExecutionMode::Regular, |session| {
|
||||||
|
Box::pin(get_brocade_info(session))
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(match version_info.os {
|
||||||
|
BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
|
||||||
|
BrocadeOs::NetworkOperatingSystem => {
|
||||||
|
Box::new(NetworkOperatingSystemClient::init(shell, version_info))
|
||||||
|
}
|
||||||
|
BrocadeOs::Unknown => todo!(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait BrocadeClient: std::fmt::Debug {
|
||||||
|
/// Retrieves the operating system and version details from the connected Brocade switch.
|
||||||
|
///
|
||||||
|
/// This is typically the first call made after establishing a connection to determine
|
||||||
|
/// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A `BrocadeInfo` structure containing parsed OS type and version string.
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error>;
|
||||||
|
|
||||||
|
/// Retrieves the dynamically learned MAC address table from the switch.
|
||||||
|
///
|
||||||
|
/// This is crucial for discovering where specific network endpoints (MAC addresses)
|
||||||
|
/// are currently located on the physical ports.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
|
||||||
|
/// and the associated port name/index.
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
|
||||||
|
|
||||||
|
/// Derives the physical connections used to link multiple switches together
|
||||||
|
/// to form a single logical entity (stack, fabric, etc.).
|
||||||
|
///
|
||||||
|
/// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
|
||||||
|
/// to return a standardized view of the topology.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
|
||||||
|
/// If the switch is not stacked, returns an empty vector.
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
|
||||||
|
|
||||||
|
/// Retrieves the status for all interfaces
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A vector of `InterfaceInfo` structures.
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
|
||||||
|
|
||||||
|
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// Scans the existing configuration to find the next available (unused)
|
||||||
|
/// Port-Channel ID (`lag` or `trunk`) for assignment.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// The smallest, unassigned `PortChannelId` within the supported range.
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
|
||||||
|
|
||||||
|
/// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
|
||||||
|
/// using the specified channel ID and ports.
|
||||||
|
///
|
||||||
|
/// The resulting configuration must be persistent (saved to startup-config).
|
||||||
|
/// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
|
||||||
|
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
|
||||||
|
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// Removes all configuration associated with the specified Port-Channel name.
|
||||||
|
///
|
||||||
|
/// This operation should be idempotent; attempting to clear a non-existent
|
||||||
|
/// channel should succeed (or return a benign error).
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// * `channel_name`: The name of the Port-Channel (LAG) to delete.
|
||||||
|
///
|
||||||
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> {
|
||||||
|
let output = session.run_command("show version").await?;
|
||||||
|
|
||||||
|
if output.contains("Network Operating System") {
|
||||||
|
let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
||||||
|
.expect("Invalid regex");
|
||||||
|
let version = re
|
||||||
|
.captures(&output)
|
||||||
|
.and_then(|cap| cap.name("version"))
|
||||||
|
.map(|m| m.as_str().to_string())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
return Ok(BrocadeInfo {
|
||||||
|
os: BrocadeOs::NetworkOperatingSystem,
|
||||||
|
version,
|
||||||
|
});
|
||||||
|
} else if output.contains("ICX") {
|
||||||
|
let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
||||||
|
.expect("Invalid regex");
|
||||||
|
let version = re
|
||||||
|
.captures(&output)
|
||||||
|
.and_then(|cap| cap.name("version"))
|
||||||
|
.map(|m| m.as_str().to_string())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
return Ok(BrocadeInfo {
|
||||||
|
os: BrocadeOs::FastIron,
|
||||||
|
version,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::UnexpectedError("Unknown Brocade OS version".into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
|
||||||
|
let cleaned_mac = value.replace('.', "");
|
||||||
|
|
||||||
|
if cleaned_mac.len() != 12 {
|
||||||
|
return Err(format!("Invalid MAC address: {value}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut bytes = [0u8; 6];
|
||||||
|
for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() {
|
||||||
|
let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?;
|
||||||
|
bytes[i] =
|
||||||
|
u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(MacAddress(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
NetworkError(String),
|
||||||
|
AuthenticationError(String),
|
||||||
|
ConfigurationError(String),
|
||||||
|
TimeoutError(String),
|
||||||
|
UnexpectedError(String),
|
||||||
|
CommandError(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Error::NetworkError(msg) => write!(f, "Network error: {msg}"),
|
||||||
|
Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"),
|
||||||
|
Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"),
|
||||||
|
Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"),
|
||||||
|
Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"),
|
||||||
|
Error::CommandError(msg) => write!(f, "{msg}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Error> for String {
|
||||||
|
fn from(val: Error) -> Self {
|
||||||
|
format!("{val}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for Error {}
|
||||||
|
|
||||||
|
impl From<russh::Error> for Error {
|
||||||
|
fn from(value: russh::Error) -> Self {
|
||||||
|
Error::NetworkError(format!("Russh client error: {value}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
333
brocade/src/network_operating_system.rs
Normal file
333
brocade/src/network_operating_system.rs
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
|
use log::{debug, info};
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||||
|
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||||
|
parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NetworkOperatingSystemClient {
|
||||||
|
shell: BrocadeShell,
|
||||||
|
version: BrocadeInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkOperatingSystemClient {
|
||||||
|
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
||||||
|
shell.before_all(vec!["terminal length 0".into()]);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
shell,
|
||||||
|
version: version_info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
||||||
|
debug!("[Brocade] Parsing mac address entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 5 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (vlan, mac_address, port) = match parts.len() {
|
||||||
|
5 => (
|
||||||
|
u16::from_str(parts[0]).ok()?,
|
||||||
|
parse_brocade_mac_address(parts[1]).ok()?,
|
||||||
|
parts[4].to_string(),
|
||||||
|
),
|
||||||
|
_ => (
|
||||||
|
u16::from_str(parts[0]).ok()?,
|
||||||
|
parse_brocade_mac_address(parts[1]).ok()?,
|
||||||
|
parts[5].to_string(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let port =
|
||||||
|
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
||||||
|
|
||||||
|
match port {
|
||||||
|
Ok(p) => Some(Ok(MacAddressEntry {
|
||||||
|
vlan,
|
||||||
|
mac_address,
|
||||||
|
port: p,
|
||||||
|
})),
|
||||||
|
Err(e) => Some(Err(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
||||||
|
debug!("[Brocade] Parsing inter switch link entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 10 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let local_port = PortLocation::from_str(parts[2]).ok()?;
|
||||||
|
let remote_port = PortLocation::from_str(parts[5]).ok()?;
|
||||||
|
|
||||||
|
Some(Ok(InterSwitchLink {
|
||||||
|
local_port,
|
||||||
|
remote_port: Some(remote_port),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
|
||||||
|
debug!("[Brocade] Parsing interface status entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 6 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let interface_type = match parts[0] {
|
||||||
|
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
|
||||||
|
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
let port_location = PortLocation::from_str(parts[1]).ok()?;
|
||||||
|
let status = match parts[2] {
|
||||||
|
"connected" => InterfaceStatus::Connected,
|
||||||
|
"notconnected" => InterfaceStatus::NotConnected,
|
||||||
|
"sfpAbsent" => InterfaceStatus::SfpAbsent,
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
let operating_mode = match parts[3] {
|
||||||
|
"ISL" => Some(PortOperatingMode::Fabric),
|
||||||
|
"Trunk" => Some(PortOperatingMode::Trunk),
|
||||||
|
"Access" => Some(PortOperatingMode::Access),
|
||||||
|
"--" => None,
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(Ok(InterfaceInfo {
|
||||||
|
name: format!("{interface_type} {port_location}"),
|
||||||
|
port_location,
|
||||||
|
interface_type,
|
||||||
|
operating_mode,
|
||||||
|
status,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_configure_interfaces_error(&self, err: Error) -> Error {
|
||||||
|
debug!("[Brocade] {err}");
|
||||||
|
|
||||||
|
if let Error::CommandError(message) = &err {
|
||||||
|
if message.contains("switchport")
|
||||||
|
&& message.contains("Cannot configure aggregator member")
|
||||||
|
{
|
||||||
|
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
|
||||||
|
|
||||||
|
if let Some(caps) = re.captures(message) {
|
||||||
|
let interface_type = &caps[1];
|
||||||
|
let port_location = &caps[2];
|
||||||
|
let interface = format!("{interface_type} {port_location}");
|
||||||
|
|
||||||
|
return Error::CommandError(format!(
|
||||||
|
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl BrocadeClient for NetworkOperatingSystemClient {
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||||
|
Ok(self.version.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show mac-address-table", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(1)
|
||||||
|
.filter_map(|line| self.parse_mac_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show fabric isl", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(6)
|
||||||
|
.filter_map(|line| self.parse_inter_switch_link_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command(
|
||||||
|
"show interface status rbridge-id all",
|
||||||
|
ExecutionMode::Regular,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(2)
|
||||||
|
.filter_map(|line| self.parse_interface_status_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
||||||
|
|
||||||
|
let mut commands = vec!["configure terminal".to_string()];
|
||||||
|
|
||||||
|
for interface in interfaces {
|
||||||
|
commands.push(format!("interface {}", interface.0));
|
||||||
|
|
||||||
|
match interface.1 {
|
||||||
|
PortOperatingMode::Fabric => {
|
||||||
|
commands.push("fabric isl enable".into());
|
||||||
|
commands.push("fabric trunk enable".into());
|
||||||
|
}
|
||||||
|
PortOperatingMode::Trunk => {
|
||||||
|
commands.push("switchport".into());
|
||||||
|
commands.push("switchport mode trunk".into());
|
||||||
|
commands.push("no spanning-tree shutdown".into());
|
||||||
|
commands.push("no fabric isl enable".into());
|
||||||
|
commands.push("no fabric trunk enable".into());
|
||||||
|
}
|
||||||
|
PortOperatingMode::Access => {
|
||||||
|
commands.push("switchport".into());
|
||||||
|
commands.push("switchport mode access".into());
|
||||||
|
commands.push("switchport access vlan 1".into());
|
||||||
|
commands.push("no spanning-tree shutdown".into());
|
||||||
|
commands.push("no fabric isl enable".into());
|
||||||
|
commands.push("no fabric trunk enable".into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commands.push("no shutdown".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await
|
||||||
|
.map_err(|err| self.map_configure_interfaces_error(err))?;
|
||||||
|
|
||||||
|
info!("[Brocade] Interfaces configured.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||||
|
info!("[Brocade] Finding next available channel id...");
|
||||||
|
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show port-channel summary", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let used_ids: Vec<u8> = output
|
||||||
|
.lines()
|
||||||
|
.skip(6)
|
||||||
|
.filter_map(|line| {
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 8 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
u8::from_str(parts[0]).ok()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut next_id: u8 = 1;
|
||||||
|
loop {
|
||||||
|
if !used_ids.contains(&next_id) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("[Brocade] Found channel id: {next_id}");
|
||||||
|
Ok(next_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
info!(
|
||||||
|
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
|
||||||
|
ports
|
||||||
|
.iter()
|
||||||
|
.map(|p| format!("{p}"))
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(", ")
|
||||||
|
);
|
||||||
|
|
||||||
|
let interfaces = self.get_interfaces().await?;
|
||||||
|
|
||||||
|
let mut commands = vec![
|
||||||
|
"configure terminal".into(),
|
||||||
|
format!("interface port-channel {}", channel_id),
|
||||||
|
"no shutdown".into(),
|
||||||
|
"exit".into(),
|
||||||
|
];
|
||||||
|
|
||||||
|
for port in ports {
|
||||||
|
let interface = interfaces.iter().find(|i| i.port_location == *port);
|
||||||
|
let Some(interface) = interface else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
commands.push(format!("interface {}", interface.name));
|
||||||
|
commands.push("no switchport".into());
|
||||||
|
commands.push("no ip address".into());
|
||||||
|
commands.push("no fabric isl enable".into());
|
||||||
|
commands.push("no fabric trunk enable".into());
|
||||||
|
commands.push(format!("channel-group {channel_id} mode active"));
|
||||||
|
commands.push("no shutdown".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
||||||
|
info!("[Brocade] Clearing port-channel: {channel_name}");
|
||||||
|
|
||||||
|
let commands = vec![
|
||||||
|
"configure terminal".into(),
|
||||||
|
format!("no interface port-channel {}", channel_name),
|
||||||
|
"exit".into(),
|
||||||
|
];
|
||||||
|
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
370
brocade/src/shell.rs
Normal file
370
brocade/src/shell.rs
Normal file
@@ -0,0 +1,370 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use crate::BrocadeOptions;
|
||||||
|
use crate::Error;
|
||||||
|
use crate::ExecutionMode;
|
||||||
|
use crate::TimeoutConfig;
|
||||||
|
use crate::ssh;
|
||||||
|
|
||||||
|
use log::debug;
|
||||||
|
use log::info;
|
||||||
|
use russh::ChannelMsg;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BrocadeShell {
|
||||||
|
ip: IpAddr,
|
||||||
|
port: u16,
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
options: BrocadeOptions,
|
||||||
|
before_all_commands: Vec<String>,
|
||||||
|
after_all_commands: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BrocadeShell {
|
||||||
|
pub async fn init(
|
||||||
|
ip_addresses: &[IpAddr],
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: Option<BrocadeOptions>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let ip = ip_addresses
|
||||||
|
.first()
|
||||||
|
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
||||||
|
|
||||||
|
let base_options = options.unwrap_or_default();
|
||||||
|
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
ip: *ip,
|
||||||
|
port,
|
||||||
|
username: username.to_string(),
|
||||||
|
password: password.to_string(),
|
||||||
|
before_all_commands: vec![],
|
||||||
|
after_all_commands: vec![],
|
||||||
|
options,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
||||||
|
BrocadeSession::open(
|
||||||
|
self.ip,
|
||||||
|
self.port,
|
||||||
|
&self.username,
|
||||||
|
&self.password,
|
||||||
|
self.options.clone(),
|
||||||
|
mode,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error>
|
||||||
|
where
|
||||||
|
F: FnOnce(
|
||||||
|
&mut BrocadeSession,
|
||||||
|
) -> std::pin::Pin<
|
||||||
|
Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>,
|
||||||
|
>,
|
||||||
|
{
|
||||||
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
|
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||||
|
let result = callback(&mut session).await;
|
||||||
|
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||||
|
|
||||||
|
session.close().await?;
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
|
||||||
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
|
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||||
|
let result = session.run_command(command).await;
|
||||||
|
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||||
|
|
||||||
|
session.close().await?;
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_commands(
|
||||||
|
&self,
|
||||||
|
commands: Vec<String>,
|
||||||
|
mode: ExecutionMode,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
|
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||||
|
let result = session.run_commands(commands).await;
|
||||||
|
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||||
|
|
||||||
|
session.close().await?;
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn before_all(&mut self, commands: Vec<String>) {
|
||||||
|
self.before_all_commands = commands;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn after_all(&mut self, commands: Vec<String>) {
|
||||||
|
self.after_all_commands = commands;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BrocadeSession {
|
||||||
|
pub channel: russh::Channel<russh::client::Msg>,
|
||||||
|
pub mode: ExecutionMode,
|
||||||
|
pub options: BrocadeOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BrocadeSession {
|
||||||
|
pub async fn open(
|
||||||
|
ip: IpAddr,
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: BrocadeOptions,
|
||||||
|
mode: ExecutionMode,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let client = ssh::create_client(ip, port, username, password, &options).await?;
|
||||||
|
let mut channel = client.channel_open_session().await?;
|
||||||
|
|
||||||
|
channel
|
||||||
|
.request_pty(false, "vt100", 80, 24, 0, 0, &[])
|
||||||
|
.await?;
|
||||||
|
channel.request_shell(false).await?;
|
||||||
|
|
||||||
|
wait_for_shell_ready(&mut channel, &options.timeouts).await?;
|
||||||
|
|
||||||
|
if let ExecutionMode::Privileged = mode {
|
||||||
|
try_elevate_session(&mut channel, username, password, &options.timeouts).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
channel,
|
||||||
|
mode,
|
||||||
|
options,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn close(&mut self) -> Result<(), Error> {
|
||||||
|
debug!("[Brocade] Closing session...");
|
||||||
|
|
||||||
|
self.channel.data(&b"exit\n"[..]).await?;
|
||||||
|
if let ExecutionMode::Privileged = self.mode {
|
||||||
|
self.channel.data(&b"exit\n"[..]).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
while start.elapsed() < self.options.timeouts.cleanup {
|
||||||
|
match timeout(self.options.timeouts.message_wait, self.channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Close)) => break,
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) | Err(_) => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("[Brocade] Session closed.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
|
||||||
|
if self.should_skip_command(command) {
|
||||||
|
return Ok(String::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("[Brocade] Running command: '{command}'...");
|
||||||
|
|
||||||
|
self.channel
|
||||||
|
.data(format!("{}\n", command).as_bytes())
|
||||||
|
.await?;
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
|
||||||
|
let output = self.collect_command_output().await?;
|
||||||
|
let output = String::from_utf8(output)
|
||||||
|
.map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?;
|
||||||
|
|
||||||
|
self.check_for_command_errors(&output, command)?;
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> {
|
||||||
|
for command in commands {
|
||||||
|
self.run_command(&command).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn should_skip_command(&self, command: &str) -> bool {
|
||||||
|
if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
|
||||||
|
info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
|
||||||
|
let mut output = Vec::new();
|
||||||
|
let start = Instant::now();
|
||||||
|
let read_timeout = Duration::from_millis(500);
|
||||||
|
let log_interval = Duration::from_secs(5);
|
||||||
|
let mut last_log = Instant::now();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if start.elapsed() > self.options.timeouts.command_execution {
|
||||||
|
return Err(Error::TimeoutError(
|
||||||
|
"Timeout waiting for command completion.".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if start.elapsed() > self.options.timeouts.command_output
|
||||||
|
&& last_log.elapsed() > log_interval
|
||||||
|
{
|
||||||
|
info!("[Brocade] Waiting for command output...");
|
||||||
|
last_log = Instant::now();
|
||||||
|
}
|
||||||
|
|
||||||
|
match timeout(read_timeout, self.channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => {
|
||||||
|
output.extend_from_slice(&data);
|
||||||
|
let current_output = String::from_utf8_lossy(&output);
|
||||||
|
if current_output.contains('>') || current_output.contains('#') {
|
||||||
|
return Ok(output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output),
|
||||||
|
Ok(Some(ChannelMsg::ExitStatus { exit_status })) => {
|
||||||
|
debug!("[Brocade] Command exit status: {exit_status}");
|
||||||
|
}
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) | Err(_) => {
|
||||||
|
if output.is_empty() {
|
||||||
|
if let Ok(None) = timeout(read_timeout, self.channel.wait()).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
let current_output = String::from_utf8_lossy(&output);
|
||||||
|
if current_output.contains('>') || current_output.contains('#') {
|
||||||
|
return Ok(output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
|
||||||
|
const ERROR_PATTERNS: &[&str] = &[
|
||||||
|
"invalid input",
|
||||||
|
"syntax error",
|
||||||
|
"command not found",
|
||||||
|
"unknown command",
|
||||||
|
"permission denied",
|
||||||
|
"access denied",
|
||||||
|
"authentication failed",
|
||||||
|
"configuration error",
|
||||||
|
"failed to",
|
||||||
|
"error:",
|
||||||
|
];
|
||||||
|
|
||||||
|
let output_lower = output.to_lowercase();
|
||||||
|
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
||||||
|
return Err(Error::CommandError(format!(
|
||||||
|
"Command error: {}",
|
||||||
|
output.trim()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !command.starts_with("show") && output.trim().is_empty() {
|
||||||
|
return Err(Error::CommandError(format!(
|
||||||
|
"Command '{command}' produced no output"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_shell_ready(
|
||||||
|
channel: &mut russh::Channel<russh::client::Msg>,
|
||||||
|
timeouts: &TimeoutConfig,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
while start.elapsed() < timeouts.shell_ready {
|
||||||
|
match timeout(timeouts.message_wait, channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Data { data })) => {
|
||||||
|
buffer.extend_from_slice(&data);
|
||||||
|
let output = String::from_utf8_lossy(&buffer);
|
||||||
|
let output = output.trim();
|
||||||
|
if output.ends_with('>') || output.ends_with('#') {
|
||||||
|
debug!("[Brocade] Shell ready");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) => break,
|
||||||
|
Err(_) => continue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_elevate_session(
|
||||||
|
channel: &mut russh::Channel<russh::client::Msg>,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
timeouts: &TimeoutConfig,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
channel.data(&b"enable\n"[..]).await?;
|
||||||
|
let start = Instant::now();
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
|
||||||
|
while start.elapsed() < timeouts.shell_ready {
|
||||||
|
match timeout(timeouts.message_wait, channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Data { data })) => {
|
||||||
|
buffer.extend_from_slice(&data);
|
||||||
|
let output = String::from_utf8_lossy(&buffer);
|
||||||
|
|
||||||
|
if output.ends_with('#') {
|
||||||
|
debug!("[Brocade] Privileged mode established");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.contains("User Name:") {
|
||||||
|
channel.data(format!("{}\n", username).as_bytes()).await?;
|
||||||
|
buffer.clear();
|
||||||
|
} else if output.contains("Password:") {
|
||||||
|
channel.data(format!("{}\n", password).as_bytes()).await?;
|
||||||
|
buffer.clear();
|
||||||
|
} else if output.contains('>') {
|
||||||
|
return Err(Error::AuthenticationError(
|
||||||
|
"Enable authentication failed".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) => break,
|
||||||
|
Err(_) => continue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let output = String::from_utf8_lossy(&buffer);
|
||||||
|
if output.ends_with('#') {
|
||||||
|
debug!("[Brocade] Privileged mode established");
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::AuthenticationError(format!(
|
||||||
|
"Enable failed. Output:\n{output}"
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
113
brocade/src/ssh.rs
Normal file
113
brocade/src/ssh.rs
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
use std::borrow::Cow;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use russh::client::Handler;
|
||||||
|
use russh::kex::DH_G1_SHA1;
|
||||||
|
use russh::kex::ECDH_SHA2_NISTP256;
|
||||||
|
use russh_keys::key::SSH_RSA;
|
||||||
|
|
||||||
|
use super::BrocadeOptions;
|
||||||
|
use super::Error;
|
||||||
|
|
||||||
|
#[derive(Default, Clone, Debug)]
|
||||||
|
pub struct SshOptions {
|
||||||
|
pub preferred_algorithms: russh::Preferred,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SshOptions {
|
||||||
|
fn ecdhsa_sha2_nistp256() -> Self {
|
||||||
|
Self {
|
||||||
|
preferred_algorithms: russh::Preferred {
|
||||||
|
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
||||||
|
key: Cow::Borrowed(&[SSH_RSA]),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn legacy() -> Self {
|
||||||
|
Self {
|
||||||
|
preferred_algorithms: russh::Preferred {
|
||||||
|
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
||||||
|
key: Cow::Borrowed(&[SSH_RSA]),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Client;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Handler for Client {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
async fn check_server_key(
|
||||||
|
&mut self,
|
||||||
|
_server_public_key: &russh_keys::key::PublicKey,
|
||||||
|
) -> Result<bool, Self::Error> {
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn try_init_client(
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
base_options: BrocadeOptions,
|
||||||
|
) -> Result<BrocadeOptions, Error> {
|
||||||
|
let ssh_options = vec![
|
||||||
|
SshOptions::default(),
|
||||||
|
SshOptions::ecdhsa_sha2_nistp256(),
|
||||||
|
SshOptions::legacy(),
|
||||||
|
];
|
||||||
|
|
||||||
|
for ssh in ssh_options {
|
||||||
|
let opts = BrocadeOptions {
|
||||||
|
ssh,
|
||||||
|
..base_options.clone()
|
||||||
|
};
|
||||||
|
let client = create_client(*ip, 22, username, password, &opts).await;
|
||||||
|
|
||||||
|
match client {
|
||||||
|
Ok(_) => {
|
||||||
|
return Ok(opts);
|
||||||
|
}
|
||||||
|
Err(e) => match e {
|
||||||
|
Error::NetworkError(e) => {
|
||||||
|
if e.contains("No common key exchange algorithm") {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
return Err(Error::NetworkError(e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => return Err(e),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::NetworkError(
|
||||||
|
"Could not establish ssh connection: wrong key exchange algorithm)".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn create_client(
|
||||||
|
ip: std::net::IpAddr,
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: &BrocadeOptions,
|
||||||
|
) -> Result<russh::client::Handle<Client>, Error> {
|
||||||
|
let config = russh::client::Config {
|
||||||
|
preferred: options.ssh.preferred_algorithms.clone(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?;
|
||||||
|
if !client.authenticate_password(username, password).await? {
|
||||||
|
return Err(Error::AuthenticationError(
|
||||||
|
"ssh authentication failed".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
21
examples/ha_cluster/Cargo.toml
Normal file
21
examples/ha_cluster/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-ha-cluster"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_tui = { path = "../../harmony_tui" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
|
harmony_secret = { path = "../../harmony_secret" }
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
serde = { workspace = true }
|
||||||
15
examples/ha_cluster/README.md
Normal file
15
examples/ha_cluster/README.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
## OPNSense demo
|
||||||
|
|
||||||
|
Download the virtualbox snapshot from {{TODO URL}}
|
||||||
|
|
||||||
|
Start the virtualbox image
|
||||||
|
|
||||||
|
This virtualbox image is configured to use a bridge on the host's physical interface, make sure the bridge is up and the virtual machine can reach internet.
|
||||||
|
|
||||||
|
Credentials are opnsense default (root/opnsense)
|
||||||
|
|
||||||
|
Run the project with the correct ip address on the command line :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run -p example-opnsense -- 192.168.5.229
|
||||||
|
```
|
||||||
141
examples/ha_cluster/src/main.rs
Normal file
141
examples/ha_cluster/src/main.rs
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
use std::{
|
||||||
|
net::{IpAddr, Ipv4Addr},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
|
use cidr::Ipv4Cidr;
|
||||||
|
use harmony::{
|
||||||
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::{
|
||||||
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
|
http::StaticFilesHttpScore,
|
||||||
|
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
||||||
|
opnsense::OPNsenseShellCommandScore,
|
||||||
|
tftp::TftpScore,
|
||||||
|
},
|
||||||
|
topology::{LogicalHost, UnmanagedRouter},
|
||||||
|
};
|
||||||
|
use harmony_macros::{ip, mac_address};
|
||||||
|
use harmony_secret::{Secret, SecretManager};
|
||||||
|
use harmony_types::net::Url;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let firewall = harmony::topology::LogicalHost {
|
||||||
|
ip: ip!("192.168.5.229"),
|
||||||
|
name: String::from("opnsense-1"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
|
let opnsense = Arc::new(
|
||||||
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
|
);
|
||||||
|
let lan_subnet = Ipv4Addr::new(10, 100, 8, 0);
|
||||||
|
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
||||||
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
|
gateway_ip,
|
||||||
|
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
|
||||||
|
)),
|
||||||
|
load_balancer: opnsense.clone(),
|
||||||
|
firewall: opnsense.clone(),
|
||||||
|
tftp_server: opnsense.clone(),
|
||||||
|
http_server: opnsense.clone(),
|
||||||
|
dhcp_server: opnsense.clone(),
|
||||||
|
dns_server: opnsense.clone(),
|
||||||
|
control_plane: vec![LogicalHost {
|
||||||
|
ip: ip!("10.100.8.20"),
|
||||||
|
name: "cp0".to_string(),
|
||||||
|
}],
|
||||||
|
bootstrap_host: LogicalHost {
|
||||||
|
ip: ip!("10.100.8.20"),
|
||||||
|
name: "cp0".to_string(),
|
||||||
|
},
|
||||||
|
workers: vec![],
|
||||||
|
switch_client: switch_client.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let inventory = Inventory {
|
||||||
|
location: Location::new(
|
||||||
|
"232 des Éperviers, Wendake, Qc, G0A 4V0".to_string(),
|
||||||
|
"wk".to_string(),
|
||||||
|
),
|
||||||
|
switch: SwitchGroup::from([]),
|
||||||
|
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||||
|
storage_host: vec![],
|
||||||
|
worker_host: vec![],
|
||||||
|
control_plane_host: vec![
|
||||||
|
PhysicalHost::empty(HostCategory::Server)
|
||||||
|
.mac_address(mac_address!("08:00:27:62:EC:C3")),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO regroup smaller scores in a larger one such as this
|
||||||
|
// let okd_boostrap_preparation();
|
||||||
|
|
||||||
|
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
|
||||||
|
let dns_score = OKDDnsScore::new(&topology);
|
||||||
|
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
||||||
|
|
||||||
|
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
|
let http_score = StaticFilesHttpScore {
|
||||||
|
folder_to_serve: Some(Url::LocalFolder(
|
||||||
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
|
)),
|
||||||
|
files: vec![],
|
||||||
|
remote_path: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_tui::run(
|
||||||
|
inventory,
|
||||||
|
topology,
|
||||||
|
vec![
|
||||||
|
Box::new(dns_score),
|
||||||
|
Box::new(dhcp_score),
|
||||||
|
Box::new(load_balancer_score),
|
||||||
|
Box::new(tftp_score),
|
||||||
|
Box::new(http_score),
|
||||||
|
Box::new(OPNsenseShellCommandScore {
|
||||||
|
opnsense: opnsense.get_opnsense_config(),
|
||||||
|
command: "touch /tmp/helloharmonytouching".to_string(),
|
||||||
|
}),
|
||||||
|
Box::new(SuccessScore {}),
|
||||||
|
Box::new(ErrorScore {}),
|
||||||
|
Box::new(PanicScore {}),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
@@ -17,3 +17,5 @@ harmony_secret = { path = "../../harmony_secret" }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
|||||||
@@ -3,12 +3,13 @@ use std::{
|
|||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::SshKeyPair,
|
config::secret::SshKeyPair,
|
||||||
data::{FileContent, FilePath},
|
data::{FileContent, FilePath},
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
http::StaticFilesHttpScore,
|
http::StaticFilesHttpScore,
|
||||||
@@ -22,8 +23,9 @@ use harmony::{
|
|||||||
topology::{LogicalHost, UnmanagedRouter},
|
topology::{LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, mac_address};
|
||||||
use harmony_secret::SecretManager;
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -32,6 +34,26 @@ async fn main() {
|
|||||||
name: String::from("fw0"),
|
name: String::from("fw0"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
let opnsense = Arc::new(
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
);
|
);
|
||||||
@@ -39,6 +61,7 @@ async fn main() {
|
|||||||
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
||||||
// when setting up the opnsense firewall
|
// when setting up the opnsense firewall
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
@@ -83,7 +106,7 @@ async fn main() {
|
|||||||
name: "wk2".to_string(),
|
name: "wk2".to_string(),
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
switch: vec![],
|
switch_client: switch_client.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
@@ -166,3 +189,9 @@ async fn main() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,3 +19,4 @@ log = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
use brocade::BrocadeOptions;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{Location, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
@@ -22,6 +23,26 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
||||||
let config = config.unwrap();
|
let config = config.unwrap();
|
||||||
|
|
||||||
@@ -38,6 +59,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -58,7 +80,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "bootstrap".to_string(),
|
name: "bootstrap".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch: vec![],
|
switch_client: switch_client.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,3 +97,9 @@ pub fn get_inventory() -> Inventory {
|
|||||||
control_plane_host: vec![],
|
control_plane_host: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,3 +19,4 @@ log = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
|||||||
@@ -1,13 +1,15 @@
|
|||||||
|
use brocade::BrocadeOptions;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::OPNSenseFirewallCredentials,
|
config::secret::OPNSenseFirewallCredentials,
|
||||||
hardware::{Location, SwitchGroup},
|
hardware::{Location, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::SecretManager;
|
use harmony_secret::{Secret, SecretManager};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{net::IpAddr, sync::Arc};
|
use std::{net::IpAddr, sync::Arc};
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
pub async fn get_topology() -> HAClusterTopology {
|
||||||
@@ -16,6 +18,26 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
||||||
let config = config.unwrap();
|
let config = config.unwrap();
|
||||||
|
|
||||||
@@ -32,6 +54,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -52,7 +75,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch: vec![],
|
switch_client: switch_client.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,3 +92,9 @@ pub fn get_inventory() -> Inventory {
|
|||||||
control_plane_host: vec![],
|
control_plane_host: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
14
examples/openbao/Cargo.toml
Normal file
14
examples/openbao/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-openbao"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
tokio.workspace = true
|
||||||
|
url.workspace = true
|
||||||
7
examples/openbao/README.md
Normal file
7
examples/openbao/README.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
To install an openbao instance with harmony simply `cargo run -p example-openbao` .
|
||||||
|
|
||||||
|
Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster.
|
||||||
|
|
||||||
|
Then follow the openbao documentation to initialize and unseal, this will make openbao usable.
|
||||||
|
|
||||||
|
https://openbao.org/docs/platform/k8s/helm/run/
|
||||||
67
examples/openbao/src/main.rs
Normal file
67
examples/openbao/src/main.rs
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
use std::{collections::HashMap, str::FromStr};
|
||||||
|
|
||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
use harmony_macros::hurl;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let values_yaml = Some(
|
||||||
|
r#"server:
|
||||||
|
standalone:
|
||||||
|
enabled: true
|
||||||
|
config: |
|
||||||
|
listener "tcp" {
|
||||||
|
tls_disable = true
|
||||||
|
address = "[::]:8200"
|
||||||
|
cluster_address = "[::]:8201"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage "file" {
|
||||||
|
path = "/openbao/data"
|
||||||
|
}
|
||||||
|
|
||||||
|
service:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
dataStorage:
|
||||||
|
enabled: true
|
||||||
|
size: 10Gi
|
||||||
|
storageClass: null
|
||||||
|
accessMode: ReadWriteOnce
|
||||||
|
|
||||||
|
auditStorage:
|
||||||
|
enabled: true
|
||||||
|
size: 10Gi
|
||||||
|
storageClass: null
|
||||||
|
accessMode: ReadWriteOnce"#
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
let openbao = HelmChartScore {
|
||||||
|
namespace: Some(NonBlankString::from_str("openbao").unwrap()),
|
||||||
|
release_name: NonBlankString::from_str("openbao").unwrap(),
|
||||||
|
chart_name: NonBlankString::from_str("openbao/openbao").unwrap(),
|
||||||
|
chart_version: None,
|
||||||
|
values_overrides: None,
|
||||||
|
values_yaml,
|
||||||
|
create_namespace: true,
|
||||||
|
install_only: true,
|
||||||
|
repository: Some(HelmRepository::new(
|
||||||
|
"openbao".to_string(),
|
||||||
|
hurl!("https://openbao.github.io/openbao-helm"),
|
||||||
|
true,
|
||||||
|
)),
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(openbao)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -8,7 +8,7 @@ publish = false
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
harmony = { path = "../../harmony" }
|
harmony = { path = "../../harmony" }
|
||||||
harmony_tui = { path = "../../harmony_tui" }
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
harmony_types = { path = "../../harmony_types" }
|
harmony_types = { path = "../../harmony_types" }
|
||||||
cidr = { workspace = true }
|
cidr = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
@@ -16,3 +16,6 @@ harmony_macros = { path = "../../harmony_macros" }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
harmony_secret = { path = "../../harmony_secret" }
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
serde = { workspace = true }
|
||||||
|
|||||||
@@ -1,15 +1,23 @@
|
|||||||
## OPNSense demo
|
# OPNSense Demo
|
||||||
|
|
||||||
Download the virtualbox snapshot from {{TODO URL}}
|
This example demonstrate how to manage an Opnsense server with harmony.
|
||||||
|
|
||||||
Start the virtualbox image
|
todo: add more info
|
||||||
|
|
||||||
This virtualbox image is configured to use a bridge on the host's physical interface, make sure the bridge is up and the virtual machine can reach internet.
|
## Demo instructions
|
||||||
|
|
||||||
Credentials are opnsense default (root/opnsense)
|
todo: add detailed instructions
|
||||||
|
|
||||||
Run the project with the correct ip address on the command line :
|
- setup the example execution environment
|
||||||
|
- setup your system configuration
|
||||||
|
- topology
|
||||||
|
- scores
|
||||||
|
- secrets
|
||||||
|
- build
|
||||||
|
- execute
|
||||||
|
- verify/inspect
|
||||||
|
|
||||||
|
## Example execution
|
||||||
|
|
||||||
|
See [learning tool documentation](./scripts/README.md)
|
||||||
|
|
||||||
```bash
|
|
||||||
cargo run -p example-opnsense -- 192.168.5.229
|
|
||||||
```
|
|
||||||
|
|||||||
4
examples/opnsense/env.sh
Normal file
4
examples/opnsense/env.sh
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
export HARMONY_SECRET_NAMESPACE=example-opnsense
|
||||||
|
export HARMONY_SECRET_STORE=file
|
||||||
|
export HARMONY_DATABASE_URL=sqlite://harmony_vms.sqlite RUST_LOG=info
|
||||||
|
export RUST_LOG=info
|
||||||
96
examples/opnsense/scripts/README.md
Normal file
96
examples/opnsense/scripts/README.md
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
# Virtualized Execution Environment for Harmony
|
||||||
|
|
||||||
|
Scripts included in this directory have 3 purposes:
|
||||||
|
|
||||||
|
- automate initial setup of localhost or VM (nested virtualization) for this example
|
||||||
|
- prototype a solution for an 'OpensenseLocalhostTopology'
|
||||||
|
- prototype
|
||||||
|
|
||||||
|
This exprimentation aim to find an approach suitable for using harmony on virtualised execution environment such that:
|
||||||
|
|
||||||
|
- it straights forward for a user with minimal knowledge to start testing harmony
|
||||||
|
- installation and execution have **minimal impact on the user desktop**
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
1. download this directory
|
||||||
|
2. add this directory in your PATH (example `. setup`)
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
# show help page
|
||||||
|
harmony-vee
|
||||||
|
|
||||||
|
# show active configurations
|
||||||
|
harmony-vee config
|
||||||
|
|
||||||
|
# show what will be modified at installation
|
||||||
|
harmony-vee install --dry-run
|
||||||
|
|
||||||
|
# install
|
||||||
|
harmony-vee install
|
||||||
|
|
||||||
|
# show what will be modified at unistallation
|
||||||
|
harmony-vee uninstall --dry-run
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create and start a new Virtual Execution Environment
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
# Create a HVEE to test opnsense_score
|
||||||
|
harmony-vee init opnsense_score
|
||||||
|
|
||||||
|
# List existing HVEE
|
||||||
|
harmony-vee list
|
||||||
|
|
||||||
|
# Show HVEE information including devices ip and vault type/location
|
||||||
|
harmony-vee show opnsense_score
|
||||||
|
|
||||||
|
# Start/Stop a HVEE
|
||||||
|
harmony-vee stop opnsense_score
|
||||||
|
|
||||||
|
|
||||||
|
# Destroy a HVEE instance
|
||||||
|
harmony-vee destroy opnsense_score
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Variable d'environnement
|
||||||
|
|
||||||
|
```
|
||||||
|
## directory containing harmony-ve data
|
||||||
|
# HVE_ROOT=~/.harmony-ve
|
||||||
|
|
||||||
|
## OPNSENSE SRC
|
||||||
|
# main mirror
|
||||||
|
# HVE_OPNSENSE_URL=https://pkg.opnsense.org/releases
|
||||||
|
# first alternative mirror
|
||||||
|
# HVE_OPENSENSE_URL_ALT1=https://mirror.vraphim.com/opnsense/releases
|
||||||
|
# HVE_OPNSENSE_URL_ALT2=https://mirror.winsub.kr/opnsense/releases
|
||||||
|
|
||||||
|
|
||||||
|
## Network
|
||||||
|
# HVE_NETWORK_LABEL=harmony
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Remarks
|
||||||
|
|
||||||
|
- A nested VM setup could be safer
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Learning environment directly on host
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Learning environment nested in a "workspace vm"
|
||||||
|
|
||||||
|

|
||||||
17
examples/opnsense/scripts/common
Normal file
17
examples/opnsense/scripts/common
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
_warn(){ >&2 echo "WARNING: $*" ; }
|
||||||
|
|
||||||
|
_fatal(){
|
||||||
|
>&2 echo "FATAL ERROR: $*"
|
||||||
|
>&2 echo stopping...
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
pushd () {
|
||||||
|
command pushd "$@" > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
popd () {
|
||||||
|
command popd "$@" > /dev/null
|
||||||
|
}
|
||||||
31
examples/opnsense/scripts/default-env-var
Normal file
31
examples/opnsense/scripts/default-env-var
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Conventions:
|
||||||
|
# - Namespaced with HVE, short for Harmony Virtualised Execution Environment
|
||||||
|
# - Prefixed values used internally
|
||||||
|
# - Not prefixed may be supercharged by the user
|
||||||
|
|
||||||
|
# Root of harmony data
|
||||||
|
_HVE_ROOT=${HVE_ROOT:-$HOME/harmony-ve}
|
||||||
|
[ -d "$_HVE_ROOT" ] || mkdir -p "${_HVE_ROOT}"
|
||||||
|
|
||||||
|
|
||||||
|
_HVE_SRC_IMG=${_HVE_ROOT}/src/images
|
||||||
|
[ -d "$_HVE_SRC_IMG" ] || mkdir -p "${_HVE_SRC_IMG}"
|
||||||
|
_HVE_IMG=${_HVE_ROOT}/images
|
||||||
|
[ -d "$_HVE_IMG" ] || mkdir -p "$_HVE_IMG"
|
||||||
|
|
||||||
|
# Opnsense
|
||||||
|
_HVE_OPNSENSE_URL=${HVE_OPNSENSE_URL:-https://pkg.opnsense.org/releases}
|
||||||
|
# first alternative mirror
|
||||||
|
_HVE_OPNSENSE_URL_ALT1=${HVE_OPNSENSE_URL_ALT1:-https://mirror.vraphim.com/opnsense/releases}
|
||||||
|
_HVE_OPNSENSE_URL_ALT2=${HVE_OPNSENSE_URL_ALT2:-https://mirror.winsub.kr/opnsense/releases}
|
||||||
|
|
||||||
|
_HVE_OPNSENSE_SRC_IMG=${_HVE_SRC_IMG}/opnsense
|
||||||
|
[ -d "$_HVE_OPNSENSE_SRC_IMG" ] || mkdir -p "${_HVE_OPNSENSE_SRC_IMG}"
|
||||||
|
_HVE_OPNSENSE_IMG=${_HVE_IMG}/opnsense
|
||||||
|
[ -d "$_HVE_OPNSENSE_IMG" ] || mkdir -p "${_HVE_OPNSENSE_IMG}"
|
||||||
|
|
||||||
|
# Network
|
||||||
|
_HVE_NETWORK=${HVE_NETWORK:-harmony}
|
||||||
|
_HVE_WAN_BRIDGE=${HVE_WAN_BRIDGE:-${_HVE_NETWORK}-wan-brd}
|
||||||
|
_HVE_LAN_BRIDGE=${HVE_LAN_BRIDGE:-${_HVE_NETWORK}-lann-brd}
|
||||||
|
|
||||||
48
examples/opnsense/scripts/dependencies-management
Normal file
48
examples/opnsense/scripts/dependencies-management
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
is_string_empty(){
|
||||||
|
if [ "${*:-}" != "" ]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
is_debian_family()(
|
||||||
|
is_string_empty "$(apt --version 2> /dev/null )"
|
||||||
|
)
|
||||||
|
|
||||||
|
has_ip(){
|
||||||
|
is_string_empty "$(ip -V 2> /dev/null)"
|
||||||
|
}
|
||||||
|
|
||||||
|
has_virsh(){
|
||||||
|
is_string_empty "$(virsh --version 2> /dev/null)"
|
||||||
|
}
|
||||||
|
|
||||||
|
has_virt_customize(){
|
||||||
|
is_string_empty "$(virt-customize --version 2> /dev/null)"
|
||||||
|
}
|
||||||
|
|
||||||
|
has_curl(){
|
||||||
|
is_string_empty "$(curl --version 2> /dev/null)"
|
||||||
|
}
|
||||||
|
has_wget(){
|
||||||
|
is_string_empty "$(wget --version 2> /dev/null)"
|
||||||
|
}
|
||||||
|
|
||||||
|
install_kvm(){
|
||||||
|
sudo apt install -y --no-install-recommends qemu-system libvirt-clients libvirt-daemon-system
|
||||||
|
sudo usermod -aG libvirt "$USER"
|
||||||
|
# todo: finf how to fix image access out of /var/lib/libvirt/images
|
||||||
|
sudo setfacl -Rm u:libvirt-qemu:rx $_HVE_IMG
|
||||||
|
sudo systemctl restart libvirtd
|
||||||
|
}
|
||||||
|
|
||||||
|
install_virt_customize(){
|
||||||
|
sudo apt install -y libguestfs-tools
|
||||||
|
}
|
||||||
|
|
||||||
|
install_wget(){
|
||||||
|
sudo apt install -y wget
|
||||||
|
}
|
||||||
|
|
||||||
Binary file not shown.
|
After Width: | Height: | Size: 106 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 138 KiB |
321
examples/opnsense/scripts/doc/automate-opnsense-example.drawio
Normal file
321
examples/opnsense/scripts/doc/automate-opnsense-example.drawio
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36" version="28.2.9" pages="2">
|
||||||
|
<diagram name="localhost" id="lK0WmoXmZXwFmV5PC-RW">
|
||||||
|
<mxGraphModel dx="1111" dy="487" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="827" pageHeight="1169" math="0" shadow="0">
|
||||||
|
<root>
|
||||||
|
<mxCell id="0" />
|
||||||
|
<mxCell id="1" parent="0" />
|
||||||
|
<mxCell id="1Ax8jaXdU0J25Zkiwu96-1" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="300" y="230" width="700" height="550" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="VVCo9gNhF-9Hs0fZa6i8-1" value="<b>localhost</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="380" y="240" width="60" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-1" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="350" y="283" width="230" height="67" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-2" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="590" y="393" width="310" height="270" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-3" value="<b>opnsense vm</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="620" y="403" width="90" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-4" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="620" y="250" width="280" height="60" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-5" value="<b>localhost network</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="610" y="250" width="150" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-6" value="<b>Src repo or</b><div style=""><b>Harmony learning tool</b></div>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="370" y="294" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-7" value="" style="html=1;rounded=0;direction=south;rotation=90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="760" y="290" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-8" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="X0RF5wBsf5JYURxROWwA-7" target="X0RF5wBsf5JYURxROWwA-9" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-9" value="" style="ellipse;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="771" y="342" width="8" height="8" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-13" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="810" y="290" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-14" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="X0RF5wBsf5JYURxROWwA-13" target="X0RF5wBsf5JYURxROWwA-15" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-15" value="" style="ellipse;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="820" y="342" width="8" height="8" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-16" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="760" y="383" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-17" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="X0RF5wBsf5JYURxROWwA-16" target="X0RF5wBsf5JYURxROWwA-18" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-18" value="" style="shape=requiredInterface;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;rotation=-90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="772.5" y="353" width="5" height="10" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-19" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="809" y="383" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-20" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="X0RF5wBsf5JYURxROWwA-19" target="X0RF5wBsf5JYURxROWwA-21" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-21" value="" style="shape=requiredInterface;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;rotation=-90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="821.5" y="353" width="5" height="10" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-22" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="350" y="440" width="230" height="130" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-23" value="<b>Example dependencies</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="360" y="450" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-24" value="<ul><li>kvm</li><li>virt-customize</li><li>...</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="360" y="510" width="150" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-25" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="350" y="590" width="230" height="130" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-26" value="<b>Example resources</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="370" y="600" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-27" value="<ul><li>src opnsense images</li><li>modified opnsense images</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="361" y="650" width="208" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-28" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="350" y="353" width="230" height="77" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-29" value="<b>Local workspace</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="370" y="361.5" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-30" value="<ul><li>provisioned using the learning tool</li><li>managed using harmony</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="620" y="450" width="250" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-31" value="<i><font style="color: rgb(51, 51, 51);">Minimun required to learn Harmony</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="370" y="323" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-33" value="<font color="#333333"><i>A place to store configs and</i></font><div><font color="#333333"><i>runtime info.</i></font></div>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="370" y="395" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-34" value="<i><font style="color: rgb(51, 51, 51);">Modifications of localhost</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="360" y="470" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-35" value="<i><font style="color: rgb(51, 51, 51);">Modifications of localhost</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="626.5" y="264" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-36" value="<i><font style="color: rgb(51, 51, 51);">Data store (image registry, etc.)</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="369" y="620" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="X0RF5wBsf5JYURxROWwA-37" value="<font color="#333333"><i>Execution environment</i></font>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="626.5" y="420" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
</root>
|
||||||
|
</mxGraphModel>
|
||||||
|
</diagram>
|
||||||
|
<diagram id="oOuscOXp9aWETXQepMaW" name="nested-virtualization">
|
||||||
|
<mxGraphModel dx="1111" dy="487" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="827" pageHeight="1169" math="0" shadow="0">
|
||||||
|
<root>
|
||||||
|
<mxCell id="0" />
|
||||||
|
<mxCell id="1" parent="0" />
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-1" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="190" y="60" width="1240" height="660" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-37" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="540" y="210" width="830" height="480" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-2" value="<b>localhost</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="260" y="84" width="60" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-3" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="240" y="223" width="230" height="67" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-4" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="855" y="403.75" width="310" height="266.25" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-5" value="<b>opnsense vm</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="885" y="413.75" width="90" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-6" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="810" y="230" width="530" height="120" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-7" value="<b>workspace VM network</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="825" y="234" width="150" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-8" value="<b>Src repo or</b><div style=""><b>Harmony learning tool</b></div>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="260" y="234" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-10" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="vj3pG7w7rTpS1tnBMssI-9" target="vj3pG7w7rTpS1tnBMssI-11" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-13" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="vj3pG7w7rTpS1tnBMssI-12" target="vj3pG7w7rTpS1tnBMssI-14" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-15" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1025" y="393.75" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-16" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="vj3pG7w7rTpS1tnBMssI-15" target="vj3pG7w7rTpS1tnBMssI-17" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-17" value="" style="shape=requiredInterface;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;rotation=-90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1037.5" y="363.75" width="5" height="10" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-18" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1074" y="393.75" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-19" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="vj3pG7w7rTpS1tnBMssI-18" target="vj3pG7w7rTpS1tnBMssI-20" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-20" value="" style="shape=requiredInterface;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;rotation=-90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1086.5" y="363.75" width="5" height="10" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-21" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="240" y="380" width="230" height="120" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-22" value="<b>Learning dependencies</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="250" y="391" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-23" value="<ul><li>harmony</li><li>kvm</li><li>virt-customize</li><li>...</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="250" y="450" width="150" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-24" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="240" y="540" width="230" height="130" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-25" value="<b>Example resources</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="260" y="550" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-26" value="<ul><li>Can be mounted locally<br>for persistence</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="251" y="600" width="189" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-27" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="240" y="293" width="230" height="67" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-28" value="<b>Local workspace</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="260" y="301.5" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-29" value="<ul><li>provisioned using the learning tool</li><li>managed using harmony</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="885" y="460.75" width="250" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-30" value="<i><font style="color: rgb(51, 51, 51);">Minimun required to learn Harmony</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="260" y="263" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-31" value="<font color="#333333"><i>A place to store configs</i></font>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="260" y="321" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-32" value="<i><font style="color: rgb(51, 51, 51);">Modifications of localhost</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="250" y="410" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-34" value="<i><font style="color: rgb(51, 51, 51);">Data store (image registry, etc.)</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="259" y="570" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-35" value="<font color="#333333"><i>Execution environment</i></font>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="891.5" y="430.75" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-38" value="<b>workspace VM</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="580" y="234" width="130" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-39" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="570" y="305" width="230" height="215" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-40" value="<b>workspace VM dependencies</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="580" y="316" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-41" value="<ul><li>kvm</li><li>virt-customize</li><li>...</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="580" y="375" width="150" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-44" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="570" y="540" width="230" height="130" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-48" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="830" y="277" width="235" height="45" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-45" value="<b>Example resources</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="590" y="550" width="152" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-46" value="<ul><li>src opnsense images</li><li>modified opnsense images</li></ul>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="581" y="600" width="208" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-47" value="<i><font style="color: rgb(51, 51, 51);">Data store (image registry, etc.)</font></i>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="589" y="570" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-9" value="" style="html=1;rounded=0;direction=south;rotation=90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1025" y="300.75" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-11" value="" style="ellipse;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1036" y="352.75" width="8" height="8" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-49" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1069" y="275" width="251" height="45" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-12" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1075" y="300.75" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-14" value="" style="ellipse;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1085" y="352.75" width="8" height="8" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-50" value="<b>wan</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="821.5" y="277" width="70" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-51" value="<b>lan</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1055" y="275" width="70" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-52" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1181" y="403.75" width="159" height="116.25" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-53" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="vj3pG7w7rTpS1tnBMssI-54" target="vj3pG7w7rTpS1tnBMssI-55" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-54" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1211" y="305" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-55" value="" style="ellipse;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1221" y="357" width="8" height="8" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-58" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1210" y="393.75" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-59" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="vj3pG7w7rTpS1tnBMssI-58" target="vj3pG7w7rTpS1tnBMssI-60" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-60" value="" style="shape=requiredInterface;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;rotation=-90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1222.5" y="363.75" width="5" height="10" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-61" value="<b>other vm</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1181" y="430" width="90" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="vj3pG7w7rTpS1tnBMssI-63" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="vj3pG7w7rTpS1tnBMssI-24" target="vj3pG7w7rTpS1tnBMssI-44" edge="1">
|
||||||
|
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||||
|
<mxPoint x="600" y="770" as="sourcePoint" />
|
||||||
|
<mxPoint x="360" y="885" as="targetPoint" />
|
||||||
|
</mxGeometry>
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-1" value="" style="rounded=1;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="799" y="84" width="280" height="60" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-2" value="<b>localhost network</b>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="789" y="84" width="150" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-6" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="989" y="124" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-7" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="Za91R7Nqk7Jj5VTRes6Y-6" target="Za91R7Nqk7Jj5VTRes6Y-8" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-8" value="" style="ellipse;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="999" y="176" width="8" height="8" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-12" value="" style="html=1;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="988" y="217" width="30" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-13" value="" style="endArrow=none;html=1;rounded=0;align=center;verticalAlign=top;endFill=0;labelBackgroundColor=none;endSize=2;" parent="1" source="Za91R7Nqk7Jj5VTRes6Y-12" target="Za91R7Nqk7Jj5VTRes6Y-14" edge="1">
|
||||||
|
<mxGeometry relative="1" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-14" value="" style="shape=requiredInterface;html=1;fontSize=11;align=center;fillColor=none;points=[];aspect=fixed;resizable=0;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=top;flipH=1;rotation=-90;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="1000.5" y="187" width="5" height="10" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
<mxCell id="Za91R7Nqk7Jj5VTRes6Y-15" value="<font color="#333333"><i>No modification required</i></font>" style="text;html=1;whiteSpace=wrap;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;rounded=0;" parent="1" vertex="1">
|
||||||
|
<mxGeometry x="805.5" y="98" width="200" height="30" as="geometry" />
|
||||||
|
</mxCell>
|
||||||
|
</root>
|
||||||
|
</mxGraphModel>
|
||||||
|
</diagram>
|
||||||
|
</mxfile>
|
||||||
13
examples/opnsense/scripts/env-var
Normal file
13
examples/opnsense/scripts/env-var
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
## directory containing harmony-ve data
|
||||||
|
# HVE_ROOT=~/.harmony-ve
|
||||||
|
|
||||||
|
## OPNSENSE SRC
|
||||||
|
# main mirror
|
||||||
|
# HVE_OPNSENSE_URL=https://pkg.opnsense.org/releases
|
||||||
|
# first alternative mirror
|
||||||
|
# HVE_OPENSENSE_URL_ALT1=https://mirror.vraphim.com/opnsense/releases
|
||||||
|
# HVE_OPNSENSE_URL_ALT2=https://mirror.winsub.kr/opnsense/releases
|
||||||
|
|
||||||
|
|
||||||
|
## Network
|
||||||
|
# HVE_NETWORK_LABEL=harmony
|
||||||
105
examples/opnsense/scripts/harmony-ve
Executable file
105
examples/opnsense/scripts/harmony-ve
Executable file
@@ -0,0 +1,105 @@
|
|||||||
|
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
harmony-ve()(
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
[ "${1:-}" != "-d" ] || { set -x ; shift ; }
|
||||||
|
trap '[ "$?" = "0" ] || >&2 echo ABNORMAL TERMINATION' EXIT
|
||||||
|
|
||||||
|
SCRIPTS_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")")
|
||||||
|
. "${SCRIPTS_DIR}/common"
|
||||||
|
|
||||||
|
_short_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
NAME
|
||||||
|
|
||||||
|
harmony-ve
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
CLI management toolkit for Harmony Virtualized Execution Environment
|
||||||
|
|
||||||
|
SYNOPSYS
|
||||||
|
|
||||||
|
harmony-ve [GLOBAL_OPTIONS] COMMAND [OPTIONS]
|
||||||
|
|
||||||
|
harmony-ve dependencies # manage localhost depend
|
||||||
|
harmony-ve network # manage localhost netork
|
||||||
|
|
||||||
|
harmony-ve opnsense-img-src # manage opnsense OS source images
|
||||||
|
harmony-ve opnsense-img # manage opnsense OS images
|
||||||
|
|
||||||
|
harmony-ve vm # manage vm
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_extra_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
Automation CLI to easily provision and manage a Virtualized Execution Environment for testing and learning Harmony.
|
||||||
|
|
||||||
|
This tool allows:
|
||||||
|
- new harmony users to start testing within 15 minutes on their development desktop
|
||||||
|
- automate virtualized test in pipeline
|
||||||
|
|
||||||
|
GLOBAL_OPTIONS
|
||||||
|
|
||||||
|
-d Debug mode.
|
||||||
|
|
||||||
|
WARNINGS
|
||||||
|
|
||||||
|
This script is experimetal. Use with caution.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Implement functions
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
|
||||||
|
"")
|
||||||
|
_short_help
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
_short_help
|
||||||
|
_extra_help
|
||||||
|
;;
|
||||||
|
# Commands entrypoints
|
||||||
|
deps|dependencies)
|
||||||
|
harmony-ve-dependencies "${@:2}"
|
||||||
|
;;
|
||||||
|
net|network)
|
||||||
|
harmony-ve-network"${@:2}"
|
||||||
|
;;
|
||||||
|
img-src|opnsense-img-src)
|
||||||
|
harmony-ve-opnsense-img-src "${@:2}"
|
||||||
|
;;
|
||||||
|
img|opnsense-img)
|
||||||
|
harmony-ve-opnsense-img "${@:2}"
|
||||||
|
;;
|
||||||
|
vm)
|
||||||
|
harmony-ve-vm "${@:2}"
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
_warn "Unknown COMMAND '$1'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
[ "$0" != "${BASH_SOURCE}" ] || harmony-ve "${@}"
|
||||||
|
|
||||||
125
examples/opnsense/scripts/harmony-ve-dependencies
Executable file
125
examples/opnsense/scripts/harmony-ve-dependencies
Executable file
@@ -0,0 +1,125 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
#
|
||||||
|
# virt-install <= virtinst
|
||||||
|
# quemu-img
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
harmony-ve-dependencies()(
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
[ "${1:-}" != "-d" ] || { set -x ; shift ; }
|
||||||
|
trap '[ "$?" = "0" ] || >&2 echo ABNORMAL TERMINATION' EXIT
|
||||||
|
|
||||||
|
SCRIPTS_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")")
|
||||||
|
. "${SCRIPTS_DIR}/common"
|
||||||
|
_short_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
NAME
|
||||||
|
|
||||||
|
harmony-ve-dependencies
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
Manage localhost dependencies needed for Harmony Virtual Execution Environment
|
||||||
|
|
||||||
|
SYNOPSYS
|
||||||
|
|
||||||
|
devops-dependencies [GLOBAL_OPTIONS] COMMAND [OPTIONS]
|
||||||
|
|
||||||
|
devops check # Check that dependencies are installed
|
||||||
|
devops install # Install missing dependencies
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_extra_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
GLOBAL_OPTIONS
|
||||||
|
|
||||||
|
-d Debug mode.
|
||||||
|
|
||||||
|
WARNINGS
|
||||||
|
|
||||||
|
This script is experimetal. Use with caution.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_check_dependencies(){
|
||||||
|
. "${SCRIPTS_DIR}/dependencies-management"
|
||||||
|
missing=0
|
||||||
|
NEED_IP=false
|
||||||
|
NEED_KVM=false
|
||||||
|
NEED_VIRT_CUSTOMIZE=false
|
||||||
|
NEED_WGET=false
|
||||||
|
is_debian_family || _fatal only debian based version is supported
|
||||||
|
has_ip || {
|
||||||
|
missing=$(( missing + 1));
|
||||||
|
_warn "ip command is missing";
|
||||||
|
NEED_IP=true
|
||||||
|
}
|
||||||
|
has_virsh ||{
|
||||||
|
missing=$(( missing + 1));
|
||||||
|
_warn "virsh command is missing";
|
||||||
|
NEED_KVM=true
|
||||||
|
}
|
||||||
|
has_virt_customize || {
|
||||||
|
missing=$(( missing + 1));
|
||||||
|
_warn "virt-customize command is missing";
|
||||||
|
NEED_VIRT_CUSTOMIZE=true
|
||||||
|
}
|
||||||
|
has_wget || has_curl || {
|
||||||
|
missing=$(( missing + 1));
|
||||||
|
_warn "wget and curl commands are missing"; NEED_WGET=true
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_install_dependencies(){
|
||||||
|
|
||||||
|
[ "$NEED_KVM" != "true" ] || install_kvm
|
||||||
|
[ "$NEED_VIRT_CUSTOMIZE" != "true" ] || install_virt_customize
|
||||||
|
[ "$NEED_WGET" != "true" ] || install_wget
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
|
||||||
|
"")
|
||||||
|
_short_help
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
_short_help
|
||||||
|
_extra_help
|
||||||
|
;;
|
||||||
|
cdeps|check-dependencies)
|
||||||
|
_check_dependencies
|
||||||
|
if [ "$missing" -gt 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
_warn No missing dependencies
|
||||||
|
;;
|
||||||
|
ideps|install-dependencies)
|
||||||
|
_check_dependencies
|
||||||
|
_install_dependencies
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_warn "Unknown COMMAND '$1'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
[ "$0" != "${BASH_SOURCE}" ] || harmony-ve-dependencies "${@}"
|
||||||
|
|
||||||
232
examples/opnsense/scripts/harmony-ve-network
Executable file
232
examples/opnsense/scripts/harmony-ve-network
Executable file
@@ -0,0 +1,232 @@
|
|||||||
|
|
||||||
|
#! /bin/bash
|
||||||
|
# todo: allow wan to switch from ethernet to wifi
|
||||||
|
|
||||||
|
|
||||||
|
harmony-ve-network()(
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
[ "${1:-}" != "-d" ] || { set -x ; shift ; }
|
||||||
|
trap '[ "$?" = "0" ] || >&2 echo ABNORMAL TERMINATION' EXIT
|
||||||
|
|
||||||
|
SCRIPTS_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")")
|
||||||
|
. "${SCRIPTS_DIR}/common"
|
||||||
|
. "${SCRIPTS_DIR}/default-env-var"
|
||||||
|
|
||||||
|
_short_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
NAME
|
||||||
|
|
||||||
|
harmony-ve-network
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
Modify localhost network for Harmony Virtual Execution Environment
|
||||||
|
SYNOPSYS
|
||||||
|
|
||||||
|
harmony-ve-network [GLOBAL_OPTIONS] COMMAND [OPTIONS]
|
||||||
|
|
||||||
|
harmony-ve-network check
|
||||||
|
harmony-ve-network setup
|
||||||
|
harmony-ve-network cleanup
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_extra_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
GLOBAL_OPTIONS
|
||||||
|
|
||||||
|
-d Debug mode.
|
||||||
|
|
||||||
|
WARNINGS
|
||||||
|
|
||||||
|
This script is experimetal. Use with caution.
|
||||||
|
|
||||||
|
|
||||||
|
IMPLEMENTATION NOTES
|
||||||
|
|
||||||
|
- use the network manager present to add 2 bridges (wan + lan)
|
||||||
|
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# dependency management
|
||||||
|
|
||||||
|
_is_service_used(){
|
||||||
|
service=$1
|
||||||
|
sudo systemctl list-unit-files $service || return 1
|
||||||
|
sudo systemctl status --no-pager $service || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Implement functions
|
||||||
|
|
||||||
|
_list_bridges(){
|
||||||
|
|
||||||
|
ip -o link show type bridge | awk '{print $2}' | sed 's/://g'
|
||||||
|
}
|
||||||
|
|
||||||
|
_is_a_bridge(){
|
||||||
|
bridge=$1
|
||||||
|
matched=$(_list_bridges | grep "$bridge")
|
||||||
|
[ "$matched" = "$bridge" ] || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
_bridge_is_up(){
|
||||||
|
_fatal Not implemented
|
||||||
|
}
|
||||||
|
|
||||||
|
_rename_nmcli_profile(){
|
||||||
|
device=$1
|
||||||
|
profile=$(nmcli -t -f DEVICE,UUID c show --active | grep "^$device:" | cut -d':' -f2)
|
||||||
|
[ "$profile" != "" ] || _fatal Failed to find nmcli profile
|
||||||
|
|
||||||
|
sudo nmcli con mod "$profile" con-name "$device"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_create_a_bridge_using_networkmanager(){
|
||||||
|
bridge=$1
|
||||||
|
|
||||||
|
profile=$(nmcli -t -f DEVICE,UUID c show --active | grep "^$PRIMARY_INTERFACE:" | cut -d':' -f2)
|
||||||
|
|
||||||
|
nmcli conn delete "$profile"
|
||||||
|
|
||||||
|
nmcli conn add type bridge ifname $bridge con-name $bridge || _fatal Fail to create a bridge using nmcli
|
||||||
|
|
||||||
|
|
||||||
|
nmcli con add type bridge-slave ifname $PRIMARY_INTERFACE master $bridge || _fatal Fail to create a slave-interface using nmcli
|
||||||
|
|
||||||
|
nmcli con up $bridge || _fatal Fail to set interface up using nmcli
|
||||||
|
|
||||||
|
sudo systemctl restart NetworkManager.service
|
||||||
|
# todo: use a check loop until connection with a timeout
|
||||||
|
#sleep 10
|
||||||
|
#ping nationtech.io | _fatal Internet connection lost
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_delete_a_bridge_using_networkmanager(){
|
||||||
|
device=$1
|
||||||
|
|
||||||
|
nmcli conn delete bridge-slave-$PRIMARY_INTERFACE
|
||||||
|
nmcli conn delete $device
|
||||||
|
|
||||||
|
nmcli con add type ethernet ifname $PRIMARY_INTERFACE con-name $PRIMARY_INTERFACE autoconnect yes ipv4.method auto ipv6.method ignore
|
||||||
|
|
||||||
|
nmcli conn up "$PRIMARY_INTERFACE"
|
||||||
|
sudo systemctl restart NetworkManager.service
|
||||||
|
|
||||||
|
# todo: use a check loop until connection with a timeout
|
||||||
|
#sleep 10
|
||||||
|
#ping nationtech.io | _fatal Internet connection lost
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_create_a_bridge(){
|
||||||
|
|
||||||
|
bridge=$1
|
||||||
|
|
||||||
|
[ $USE_NETWORKMANAGER = 0 ] | _fatal "Only NetworkManager is implemented"
|
||||||
|
|
||||||
|
_create_a_bridge_using_networkmanager $bridge
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_setup_a_bridge(){
|
||||||
|
|
||||||
|
$bridge
|
||||||
|
|
||||||
|
bridge_exist=1
|
||||||
|
bridge_is_up=1
|
||||||
|
bridge_has_ip=1
|
||||||
|
bridge_has_route=1
|
||||||
|
bridge_is_working=1
|
||||||
|
|
||||||
|
_is_a_bridge $bridge && bridge_exists=0 || _create_a_bridge $bridge || _fatal Fail to create a bridge
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_get_networkmanager_profile_from_device(){
|
||||||
|
|
||||||
|
device=$1
|
||||||
|
|
||||||
|
profile=$(nmcli -t -f DEVICE,NAME c show --active | grep "^$device:" | cut -d':' -f2)
|
||||||
|
|
||||||
|
[ "$profile" != "" ] || _fatal Fail to retreive nmcli profile
|
||||||
|
|
||||||
|
echo "$profile"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_find_primary_interface(){
|
||||||
|
PRIMARY_INTERFACE=$(ip route | grep '^default' | sed 's/ dev /!/g' | cut -d'!' -f 2 | awk '{ print $1 }' )
|
||||||
|
[ "$PRIMARY_INTERFACE" != "" ] || _fatal Fail to find the primary interface
|
||||||
|
}
|
||||||
|
|
||||||
|
_find_used_network_manager(){
|
||||||
|
|
||||||
|
_is_service_used NetworkManager.service && USE_NETWORKMANAGER=0 || USE_NETWORKMANAGER=1
|
||||||
|
_is_service_used systemd-networkd.service && USE_SYSTEMD_NETWORKD=0 || USE_SYSTEMD_NETWORKD=1
|
||||||
|
_is_service_used dhcpd.service && USE_DHCPD=0 || USE_DHCPD=1
|
||||||
|
|
||||||
|
USE_MANUAL=0 && [ $USE_NETWORKMANAGER = 0 ] || [ $USE_SYSTEMD_NETWORKD = 0 ] || [ $USE_DHCPD = 0 ] || USE_MANUAL=0
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_connect(){
|
||||||
|
|
||||||
|
_find_used_network_manager
|
||||||
|
_find_primary_interface
|
||||||
|
|
||||||
|
_setup_a_bridge $_HVE_WAN_BRIDGE
|
||||||
|
_setup_a_bridge $_HVE_LAN_BRIDGE
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
|
||||||
|
"")
|
||||||
|
_short_help
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
_short_help
|
||||||
|
_extra_help
|
||||||
|
;;
|
||||||
|
connect)
|
||||||
|
_connect "${@:2}"
|
||||||
|
;;
|
||||||
|
disconnect)
|
||||||
|
_disconnect "${@:2}"
|
||||||
|
;;
|
||||||
|
|
||||||
|
dev)
|
||||||
|
|
||||||
|
"${@:2}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_warn "Unknown COMMAND '$1'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
[ "$0" != "${BASH_SOURCE}" ] || harmony-ve-network "${@}"
|
||||||
|
|
||||||
150
examples/opnsense/scripts/harmony-ve-opnsense-img
Executable file
150
examples/opnsense/scripts/harmony-ve-opnsense-img
Executable file
@@ -0,0 +1,150 @@
|
|||||||
|
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
harmony-ve-opnsense-img()(
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
[ "${1:-}" != "-d" ] || { set -x ; shift ; }
|
||||||
|
trap '[ "$?" = "0" ] || >&2 echo ABNORMAL TERMINATION' EXIT
|
||||||
|
|
||||||
|
SCRIPTS_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")")
|
||||||
|
. "${SCRIPTS_DIR}/common"
|
||||||
|
. "${SCRIPTS_DIR}/default-env-var"
|
||||||
|
|
||||||
|
export PATH=$SCRIPTS_DIR:$PATH
|
||||||
|
|
||||||
|
_short_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
NAME
|
||||||
|
|
||||||
|
harmony-ve-opnsense-img
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
Manage opnsense images needed by Harmony Virtual Execution Environment
|
||||||
|
SYNOPSYS
|
||||||
|
|
||||||
|
harmony-vee-opnsense-img [GLOBAL_OPTIONS] COMMAND [OPTIONS]
|
||||||
|
|
||||||
|
harmony-ve-opnsense-img list
|
||||||
|
harmony-ve-opnsense-img init NAME VERSION
|
||||||
|
harmony-ve-opnsense-img start NAME
|
||||||
|
harmony-ve-opnsense-img update NAME
|
||||||
|
harmony-ve-opnsense-img delete [NAME]
|
||||||
|
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_extra_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
GLOBAL_OPTIONS
|
||||||
|
|
||||||
|
-d Debug mode.
|
||||||
|
|
||||||
|
WARNINGS
|
||||||
|
|
||||||
|
This script is experimetal. Use with caution.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# assertions
|
||||||
|
|
||||||
|
_assert_image_do_not_exists(){
|
||||||
|
name=$1
|
||||||
|
[ ! -d "$_HVE_OPNSENSE_IMG/$name" ] || _fatal "An image '$name' already exists"
|
||||||
|
}
|
||||||
|
_assert_image_exists(){
|
||||||
|
name=$1
|
||||||
|
[ -d "$_HVE_OPNSENSE_IMG/$name" ] || _fatal "Image '$name' do not exists"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Implement functions
|
||||||
|
_init(){
|
||||||
|
name=$1
|
||||||
|
version=${2}
|
||||||
|
|
||||||
|
_assert_image_do_not_exists $name
|
||||||
|
mkdir -p "${_HVE_OPNSENSE_IMG}/$name"
|
||||||
|
|
||||||
|
harmony-ve opnsense-img-src download $version
|
||||||
|
|
||||||
|
sudo qemu-img convert -f raw -O qcow2 "$_HVE_OPNSENSE_SRC_IMG/OPNsense-${version}-nano-amd64.img" "/var/lib/libvirt/images/opnsense-$name.qcow2"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
cat <<-EOM > "$_HVE_OPNSENSE_IMG/$name/$name.sh"
|
||||||
|
|
||||||
|
virt-install \
|
||||||
|
--name $name \
|
||||||
|
--os-variant freebsd14.0 \
|
||||||
|
--vcpus=2,sockets=1,cores=2,threads=1 \
|
||||||
|
--memory 4096 \
|
||||||
|
--disk path="/var/lib/libvirt/images/opnsense-$name.qcow2" \
|
||||||
|
--network bridge=${_HVE_WAN_BRIDGE},model=virtio \
|
||||||
|
--network bridge=${_HVE_LAN_BRIDGE},model=virtio \
|
||||||
|
--graphics none \
|
||||||
|
--console pty,target_type=serial \
|
||||||
|
--import \
|
||||||
|
--autostart
|
||||||
|
EOM
|
||||||
|
|
||||||
|
chmod +x "$_HVE_OPNSENSE_IMG/$name/$name.sh"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_start(){
|
||||||
|
name=$1
|
||||||
|
_assert_image_exists $name
|
||||||
|
|
||||||
|
"$_HVE_OPNSENSE_IMG/$name/$name.sh"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
|
||||||
|
"")
|
||||||
|
_short_help
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
_short_help
|
||||||
|
_extra_help
|
||||||
|
;;
|
||||||
|
# Commands entrypoints
|
||||||
|
init)
|
||||||
|
_init "${@:2}"
|
||||||
|
;;
|
||||||
|
start)
|
||||||
|
_start "${@:2}"
|
||||||
|
;;
|
||||||
|
delete)
|
||||||
|
rm -r ${_HVE_OPNSENSE_IMG}/"$2"
|
||||||
|
;;
|
||||||
|
ls|list)
|
||||||
|
ls ${_HVE_OPNSENSE_IMG} | cat
|
||||||
|
;;
|
||||||
|
show)
|
||||||
|
ls ${_HVE_OPNSENSE_IMG}/"$2" | cat
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_warn "Unknown COMMAND '$1'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
[ "$0" != "${BASH_SOURCE}" ] || harmony-ve-opnsense-img "${@}"
|
||||||
|
|
||||||
310
examples/opnsense/scripts/harmony-ve-opnsense-img-src
Executable file
310
examples/opnsense/scripts/harmony-ve-opnsense-img-src
Executable file
@@ -0,0 +1,310 @@
|
|||||||
|
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
harmony-ve-opnsense-img-src()(
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
[ "${1:-}" != "-d" ] || { set -x ; shift ; }
|
||||||
|
trap '[ "$?" = "0" ] || >&2 echo ABNORMAL TERMINATION' EXIT
|
||||||
|
|
||||||
|
SCRIPTS_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")")
|
||||||
|
. "${SCRIPTS_DIR}/common"
|
||||||
|
. "${SCRIPTS_DIR}/default-env-var"
|
||||||
|
|
||||||
|
|
||||||
|
_short_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
NAME
|
||||||
|
|
||||||
|
harmony-ve-opnsense-img-src
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
Manage opnsense source images needed by Harmony Virtual Execution Environment
|
||||||
|
|
||||||
|
SYNOPSYS
|
||||||
|
|
||||||
|
harmony-vee-opnsense-img-src [GLOBAL_OPTIONS] COMMAND [OPTIONS]
|
||||||
|
|
||||||
|
harmony-vee-opnsense-img-src list [--remote]
|
||||||
|
harmony-vee-opnsense-img-src download [VERSION]
|
||||||
|
harmony-vee-opnsense-img-src check [VERSION]
|
||||||
|
harmony-vee-opnsense-img-src delete [VERSION]
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_extra_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
GLOBAL_OPTIONS
|
||||||
|
|
||||||
|
-d Debug mode.
|
||||||
|
|
||||||
|
WARNINGS
|
||||||
|
|
||||||
|
This script is experimetal. Use with caution.
|
||||||
|
|
||||||
|
DETAILS
|
||||||
|
|
||||||
|
- for 'list', show local images available
|
||||||
|
- for 'list --remote', show available upstream images
|
||||||
|
- for 'download', when no VERSION is specified, use the latest
|
||||||
|
- for 'delete', when no VERSION is specified, delete all image
|
||||||
|
- use the 'nano' flavor
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Implement functions
|
||||||
|
_parse_version_from_image_file_string(){
|
||||||
|
#https://pkg.opnsense.org/releases/25.7/OPNsense-25.7-nano-amd64.img.bz2
|
||||||
|
echo "$1" | cut -d '/' -f 5
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_list_local_images(){
|
||||||
|
ls "${_HVE_OPNSENSE_SRC_IMG}" | grep "OPNsense-" | grep "\-nano\-amd64\.img" | cut -d'-' -f 2 | sort -u -r
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_list_remote_images(){
|
||||||
|
curl -L -s "${_HVE_OPNSENSE_URL}" | sed 's/</\n</g' | grep href | grep 2 | cut -d'>' -f 2 | cut -d '/' -f 1 | sort -r
|
||||||
|
}
|
||||||
|
|
||||||
|
_latest_version(){
|
||||||
|
_list_remote_images | head -n 1
|
||||||
|
}
|
||||||
|
|
||||||
|
_is_downloaded(){
|
||||||
|
version=$1
|
||||||
|
name="${_HVE_OPNSENSE_SRC_IMG}/OPNsense-${version}-nano-amd64.img"
|
||||||
|
[ -f "$name" ] && return 0 || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
_is_valid_version(){
|
||||||
|
version=${1}
|
||||||
|
matched_version=$(_list_remote_images | grep "$version")
|
||||||
|
[ "$matched_version" != "" ] && return 0 || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
_download_img(){
|
||||||
|
version=$1
|
||||||
|
|
||||||
|
_download_crypto_files $version
|
||||||
|
|
||||||
|
|
||||||
|
name="OPNsense-${version}-nano-amd64.img"
|
||||||
|
compressed_name=$name.bz2
|
||||||
|
|
||||||
|
_is_downloaded $version && {
|
||||||
|
_warn "Image '$name' is already downloaded"
|
||||||
|
} || {
|
||||||
|
url=$_HVE_OPNSENSE_URL/$version/$compressed_name
|
||||||
|
>&2 echo DOWNLOAD $url
|
||||||
|
wget -q -c "${url}"
|
||||||
|
|
||||||
|
_verify_image_checksum $version
|
||||||
|
|
||||||
|
>&2 echo DECOMPRESS $url
|
||||||
|
bzip2 -d $compressed_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_compare_files_checksum(){
|
||||||
|
|
||||||
|
file1=$1
|
||||||
|
file2=$2
|
||||||
|
|
||||||
|
sha256_1=$(openssl sha256 $file1 | cut -d" " -f2)
|
||||||
|
sha256_2=$(openssl sha256 $file2 | cut -d" " -f2)
|
||||||
|
|
||||||
|
[ "$sha256_1" = "$sha256_2" ] || return 1
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_download_crypto_files(){
|
||||||
|
# see: https://docs.opnsense.org/manual/install.html#download-and-verification
|
||||||
|
|
||||||
|
version=$1
|
||||||
|
|
||||||
|
# download multiple pubkeys from different server
|
||||||
|
pubkey="OPNsense-${version}.pub"
|
||||||
|
|
||||||
|
rm -f $pubkey $pubkey.sig $pubkey.alt1 $pubkey.alt2
|
||||||
|
|
||||||
|
url=$_HVE_OPNSENSE_URL/$version/$pubkey
|
||||||
|
wget -q -c "${url}"
|
||||||
|
|
||||||
|
# failing:
|
||||||
|
wget -q -c "${url}.sig"
|
||||||
|
rm -f /tmp/file.sig
|
||||||
|
openssl base64 -d -in $pubkey.sig -out /tmp/file.sig
|
||||||
|
openssl dgst -sha256 -verify $pubkey -signature /tmp/file.sig $pubkey || _fatal "Can't verify the signature of the public key"
|
||||||
|
|
||||||
|
url_alt1=$_HVE_OPNSENSE_URL_ALT1/$version/$pubkey
|
||||||
|
wget -q -c -O "$pubkey.alt1" "${url_alt1}"
|
||||||
|
|
||||||
|
url_alt2=$_HVE_OPNSENSE_URL_ALT2/$version/$pubkey
|
||||||
|
wget -q -c -O "$pubkey.alt2" "${url_alt2}"
|
||||||
|
|
||||||
|
_compare_files_checksum $pubkey $pubkey.alt1 || _fatal "Fail to compare pubkeys" ;
|
||||||
|
|
||||||
|
_compare_files_checksum $pubkey $pubkey.alt2 || _fatal "Fail to compare pubkeys" ;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
img_sig="OPNsense-${version}-nano-amd64.img.sig"
|
||||||
|
|
||||||
|
sha256_name="OPNsense-${version}-checksums-amd64.sha256"
|
||||||
|
sha256_sig=$sha256_name.sig
|
||||||
|
|
||||||
|
[ ! -f "$img_sig" ] || rm $img_sig
|
||||||
|
[ ! -f "$sha256_name" ] || rm $sha256_name
|
||||||
|
[ ! -f "$sha256_sig" ] || rm $sha256_sig
|
||||||
|
|
||||||
|
|
||||||
|
for file in $img_sig $sha256_name $sha256_sig;
|
||||||
|
do
|
||||||
|
url=$_HVE_OPNSENSE_URL/$version/$file
|
||||||
|
wget -q -c "${url}"
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
rm -f /tmp/file.sig
|
||||||
|
openssl base64 -d -in $sha256_sig -out /tmp/file.sig
|
||||||
|
openssl dgst -sha256 -verify $pubkey -signature /tmp/file.sig $sha256_name || _fatal "Can't verify the signature of the checksum file"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_download(){
|
||||||
|
|
||||||
|
version=${1:-}
|
||||||
|
[ "${version:-}" != "" ] || _fatal "Must pass a VERSION for downloading"
|
||||||
|
|
||||||
|
_is_valid_version $version || _fatal "'$version' is not a valid version number"
|
||||||
|
|
||||||
|
|
||||||
|
_download_img ${version}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_verify_image_checksum(){
|
||||||
|
|
||||||
|
version=$1
|
||||||
|
name="OPNsense-${version}-nano-amd64.img.bz2"
|
||||||
|
sha256_file="OPNsense-${version}-checksums-amd64.sha256"
|
||||||
|
|
||||||
|
sha256=$(cat $sha256_file | grep "$name" | cut -d'=' -f 2 | tr -s [:space:])
|
||||||
|
|
||||||
|
echo "$sha256 $name" | sha256sum -c || _fatal "Checksum failed for '$name'"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_verify_image_signature(){
|
||||||
|
version=$1
|
||||||
|
|
||||||
|
# download multiple pubkeys from different server
|
||||||
|
pubkey="OPNsense-${version}.pub"
|
||||||
|
img_name="OPNsense-${version}-nano-amd64.img"
|
||||||
|
img_sig="${img_name}.sig"
|
||||||
|
|
||||||
|
|
||||||
|
rm -f /tmp/file.sig
|
||||||
|
openssl base64 -d -in $img_sig -out /tmp/file.sig
|
||||||
|
openssl dgst -sha256 -verify $pubkey -signature /tmp/file.sig $img_name || _fatal "Can't verify image signature"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_check(){
|
||||||
|
version=${1:-}
|
||||||
|
if [ "${version:-}" = "" ] ; then
|
||||||
|
for version in $(_list_local_images);
|
||||||
|
do
|
||||||
|
>&2 echo check $version
|
||||||
|
_download_crypto_files $version
|
||||||
|
_verify_image_signature $version
|
||||||
|
done
|
||||||
|
else
|
||||||
|
_download_crypto_files $version
|
||||||
|
_verify_image_signature $version
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_delete(){
|
||||||
|
version=${1:-}
|
||||||
|
if [ -z "${version:-1}" ]; then
|
||||||
|
_clear
|
||||||
|
rm -f *.img
|
||||||
|
else
|
||||||
|
rm -f *$version*.img
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_clear(){
|
||||||
|
rm -f *.pub *.sig *.bz2 *.alt1 *.alt2 *.sha256
|
||||||
|
}
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
|
||||||
|
"")
|
||||||
|
_short_help
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
_short_help
|
||||||
|
_extra_help
|
||||||
|
;;
|
||||||
|
ls|list)
|
||||||
|
|
||||||
|
if [ "${2:-}" == "" ]; then
|
||||||
|
_list_local_images
|
||||||
|
elif [ "${2:-}" == "--remote" ]; then
|
||||||
|
_list_remote_images
|
||||||
|
else
|
||||||
|
_warn "Unknown option '$2'"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
download)
|
||||||
|
pushd "${_HVE_OPNSENSE_SRC_IMG}"
|
||||||
|
_download "${2:-"$(_latest_version)"}"
|
||||||
|
popd
|
||||||
|
;;
|
||||||
|
delete)
|
||||||
|
pushd "${_HVE_OPNSENSE_SRC_IMG}"
|
||||||
|
_delete "${@:2}"
|
||||||
|
popd
|
||||||
|
;;
|
||||||
|
check)
|
||||||
|
pushd "${_HVE_OPNSENSE_SRC_IMG}"
|
||||||
|
_check "${@:2}"
|
||||||
|
popd
|
||||||
|
;;
|
||||||
|
show)
|
||||||
|
ls $_HVE_OPNSENSE_SRC_IMG | cat
|
||||||
|
;;
|
||||||
|
clear)
|
||||||
|
pushd "${_HVE_OPNSENSE_SRC_IMG}"
|
||||||
|
_clear "${@:2}"
|
||||||
|
popd
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_warn "Unknown COMMAND '$1'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
[ "$0" != "${BASH_SOURCE}" ] || harmony-ve-opnsense-img-src "${@}"
|
||||||
|
|
||||||
77
examples/opnsense/scripts/harmony-ve-vm
Executable file
77
examples/opnsense/scripts/harmony-ve-vm
Executable file
@@ -0,0 +1,77 @@
|
|||||||
|
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
harmony-ve-vm()(
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
[ "${1:-}" != "-d" ] || { set -x ; shift ; }
|
||||||
|
trap '[ "$?" = "0" ] || >&2 echo ABNORMAL TERMINATION' EXIT
|
||||||
|
|
||||||
|
BASE_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")/..")
|
||||||
|
|
||||||
|
_short_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
NAME
|
||||||
|
|
||||||
|
harmony-ve-mv
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
Manage virtalized hosts (VM) dependencies by Harmony Virtual Execution Environment
|
||||||
|
|
||||||
|
SYNOPSYS
|
||||||
|
|
||||||
|
harmony-ve-vm [GLOBAL_OPTIONS] COMMAND [OPTIONS]
|
||||||
|
|
||||||
|
harmony-ve-vm list
|
||||||
|
harmony-ve-vm create
|
||||||
|
harmony-ve-vm start
|
||||||
|
harmony-ve-vm stop
|
||||||
|
harmony-ve-vm login
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_extra_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
GLOBAL_OPTIONS
|
||||||
|
|
||||||
|
-d Debug mode.
|
||||||
|
|
||||||
|
WARNINGS
|
||||||
|
|
||||||
|
This script is experimetal. Use with caution.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Implement functions
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
|
||||||
|
"")
|
||||||
|
_short_help
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
_short_help
|
||||||
|
_extra_help
|
||||||
|
;;
|
||||||
|
# Commands entrypoints
|
||||||
|
*)
|
||||||
|
_warn "Unknown COMMAND '$1'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
[ "$0" != "${BASH_SOURCE}" ] || harmony-ve-vm "${@}"
|
||||||
|
|
||||||
75
examples/opnsense/scripts/learn-harmony
Executable file
75
examples/opnsense/scripts/learn-harmony
Executable file
@@ -0,0 +1,75 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
learn-harmony()(
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
[ "${1:-}" != "-d" ] || { set -x ; shift ; }
|
||||||
|
trap '[ "$?" = "0" ] || >&2 echo ABNORMAL TERMINATION' EXIT
|
||||||
|
|
||||||
|
BASE_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")/..")
|
||||||
|
SCRIPTS_DIR=$(readlink -f "$(dirname "${BASH_SOURCE}")")
|
||||||
|
. "${SCRIPTS_DIR}/common"
|
||||||
|
export PATH=$SCRIPTS_DIR:$PATH
|
||||||
|
|
||||||
|
_short_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
NAME
|
||||||
|
learn-harmony -- Harmony Learning Tool prototype
|
||||||
|
($(basename ${BASE_DIR}) example)
|
||||||
|
|
||||||
|
SYNOPSYS
|
||||||
|
|
||||||
|
learn-harmony [GLOBAL_OPTIONS] COMMAND [OPTIONS]
|
||||||
|
|
||||||
|
learn-harmony list # List learning steps
|
||||||
|
learn-harmony show STEP # Show instruction of step STEP
|
||||||
|
learn-harmony check [STEP] # Verify that your ready to begin step STEP+1
|
||||||
|
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_extra_help(){
|
||||||
|
|
||||||
|
cat <<-EOM
|
||||||
|
|
||||||
|
GLOBAL_OPTIONS
|
||||||
|
|
||||||
|
-d Debug mode.
|
||||||
|
|
||||||
|
WARNINGS
|
||||||
|
|
||||||
|
This script is experimetal. Use with caution.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
|
||||||
|
-h|--help|"")
|
||||||
|
_short_help
|
||||||
|
_extra_help
|
||||||
|
;;
|
||||||
|
ls|list)
|
||||||
|
echo "not implemented"
|
||||||
|
;;
|
||||||
|
show)
|
||||||
|
echo "not implemented"
|
||||||
|
;;
|
||||||
|
verify)
|
||||||
|
echo "not implemented"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_warn "Unknown COMMAND '$1'"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
[ "$0" != "${BASH_SOURCE}" ] || learn-harmony "${@}"
|
||||||
|
|
||||||
4
examples/opnsense/scripts/setup
Normal file
4
examples/opnsense/scripts/setup
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
export PATH=$(readlink -f "$(dirname "${BASH_SOURCE}")"):"${PATH}"
|
||||||
@@ -1,111 +1,77 @@
|
|||||||
use std::{
|
|
||||||
net::{IpAddr, Ipv4Addr},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use cidr::Ipv4Cidr;
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
config::secret::OPNSenseFirewallCredentials,
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::opnsense::OPNSenseFirewall,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{dhcp::DhcpScore, opnsense::OPNsenseShellCommandScore},
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
topology::LogicalHost,
|
||||||
http::StaticFilesHttpScore,
|
|
||||||
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
|
||||||
opnsense::OPNsenseShellCommandScore,
|
|
||||||
tftp::TftpScore,
|
|
||||||
},
|
|
||||||
topology::{LogicalHost, UnmanagedRouter},
|
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_types::net::Url;
|
use harmony_secret::{Secret, SecretManager};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let firewall = harmony::topology::LogicalHost {
|
let firewall = LogicalHost {
|
||||||
ip: ip!("192.168.5.229"),
|
ip: ip!("192.168.1.1"),
|
||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
let opnsense_auth = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>()
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
.await
|
||||||
);
|
.expect("Failed to get credentials");
|
||||||
let lan_subnet = Ipv4Addr::new(10, 100, 8, 0);
|
|
||||||
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
|
||||||
gateway_ip,
|
|
||||||
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
|
|
||||||
)),
|
|
||||||
load_balancer: opnsense.clone(),
|
|
||||||
firewall: opnsense.clone(),
|
|
||||||
tftp_server: opnsense.clone(),
|
|
||||||
http_server: opnsense.clone(),
|
|
||||||
dhcp_server: opnsense.clone(),
|
|
||||||
dns_server: opnsense.clone(),
|
|
||||||
control_plane: vec![LogicalHost {
|
|
||||||
ip: ip!("10.100.8.20"),
|
|
||||||
name: "cp0".to_string(),
|
|
||||||
}],
|
|
||||||
bootstrap_host: LogicalHost {
|
|
||||||
ip: ip!("10.100.8.20"),
|
|
||||||
name: "cp0".to_string(),
|
|
||||||
},
|
|
||||||
workers: vec![],
|
|
||||||
switch: vec![],
|
|
||||||
};
|
|
||||||
|
|
||||||
let inventory = Inventory {
|
let opnsense = OPNSenseFirewall::new(
|
||||||
location: Location::new(
|
firewall,
|
||||||
"232 des Éperviers, Wendake, Qc, G0A 4V0".to_string(),
|
None,
|
||||||
"wk".to_string(),
|
&opnsense_auth.username,
|
||||||
|
&opnsense_auth.password,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let dhcp_score = DhcpScore {
|
||||||
|
dhcp_range: (
|
||||||
|
ipv4!("192.168.1.100").into(),
|
||||||
|
ipv4!("192.168.1.150").into(),
|
||||||
),
|
),
|
||||||
switch: SwitchGroup::from([]),
|
host_binding: vec![],
|
||||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
next_server: None,
|
||||||
storage_host: vec![],
|
boot_filename: None,
|
||||||
worker_host: vec![],
|
filename: None,
|
||||||
control_plane_host: vec![
|
filename64: None,
|
||||||
PhysicalHost::empty(HostCategory::Server)
|
filenameipxe: Some("filename.ipxe".to_string()),
|
||||||
.mac_address(mac_address!("08:00:27:62:EC:C3")),
|
domain: None,
|
||||||
],
|
|
||||||
};
|
};
|
||||||
|
// let dns_score = OKDDnsScore::new(&topology);
|
||||||
|
// let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
||||||
|
//
|
||||||
|
// let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
|
// let http_score = StaticFilesHttpScore {
|
||||||
|
// folder_to_serve: Some(Url::LocalFolder(
|
||||||
|
// "./data/watchguard/pxe-http-files".to_string(),
|
||||||
|
// )),
|
||||||
|
// files: vec![],
|
||||||
|
// remote_path: None,
|
||||||
|
// };
|
||||||
|
let opnsense_config = opnsense.get_opnsense_config();
|
||||||
|
|
||||||
// TODO regroup smaller scores in a larger one such as this
|
harmony_cli::run(
|
||||||
// let okd_boostrap_preparation();
|
Inventory::autoload(),
|
||||||
|
opnsense,
|
||||||
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
|
|
||||||
let dns_score = OKDDnsScore::new(&topology);
|
|
||||||
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
|
||||||
|
|
||||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
|
||||||
let http_score = StaticFilesHttpScore {
|
|
||||||
folder_to_serve: Some(Url::LocalFolder(
|
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
|
||||||
)),
|
|
||||||
files: vec![],
|
|
||||||
remote_path: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_tui::run(
|
|
||||||
inventory,
|
|
||||||
topology,
|
|
||||||
vec![
|
vec![
|
||||||
Box::new(dns_score),
|
|
||||||
Box::new(dhcp_score),
|
Box::new(dhcp_score),
|
||||||
Box::new(load_balancer_score),
|
|
||||||
Box::new(tftp_score),
|
|
||||||
Box::new(http_score),
|
|
||||||
Box::new(OPNsenseShellCommandScore {
|
Box::new(OPNsenseShellCommandScore {
|
||||||
opnsense: opnsense.get_opnsense_config(),
|
opnsense: opnsense_config,
|
||||||
command: "touch /tmp/helloharmonytouching".to_string(),
|
command: "touch /tmp/helloharmonytouching_2".to_string(),
|
||||||
}),
|
}),
|
||||||
Box::new(SuccessScore {}),
|
|
||||||
Box::new(ErrorScore {}),
|
|
||||||
Box::new(PanicScore {}),
|
|
||||||
],
|
],
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
11
examples/remove_rook_osd/Cargo.toml
Normal file
11
examples/remove_rook_osd/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-remove-rook-osd"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||||
|
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||||
|
tokio.workspace = true
|
||||||
18
examples/remove_rook_osd/src/main.rs
Normal file
18
examples/remove_rook_osd/src/main.rs
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let ceph_score = CephRemoveOsd {
|
||||||
|
osd_deployment_name: "rook-ceph-osd-2".to_string(),
|
||||||
|
rook_ceph_namespace: "rook-ceph".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let topology = K8sAnywhereTopology::from_env();
|
||||||
|
let inventory = Inventory::autoload();
|
||||||
|
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -3,7 +3,7 @@ use harmony::{
|
|||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
features::{Monitoring, PackagingDeployment},
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -77,6 +77,9 @@ harmony_secret = { path = "../harmony_secret" }
|
|||||||
askama.workspace = true
|
askama.workspace = true
|
||||||
sqlx.workspace = true
|
sqlx.workspace = true
|
||||||
inquire.workspace = true
|
inquire.workspace = true
|
||||||
|
brocade = { path = "../brocade" }
|
||||||
|
option-ext = "0.2.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
|
assertor.workspace = true
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ pub enum InterpretName {
|
|||||||
Lamp,
|
Lamp,
|
||||||
ApplicationMonitoring,
|
ApplicationMonitoring,
|
||||||
K8sPrometheusCrdAlerting,
|
K8sPrometheusCrdAlerting,
|
||||||
|
CephRemoveOsd,
|
||||||
DiscoverInventoryAgent,
|
DiscoverInventoryAgent,
|
||||||
CephClusterHealth,
|
CephClusterHealth,
|
||||||
Custom(&'static str),
|
Custom(&'static str),
|
||||||
@@ -61,6 +62,7 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::Lamp => f.write_str("LAMP"),
|
InterpretName::Lamp => f.write_str("LAMP"),
|
||||||
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
||||||
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
||||||
|
InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"),
|
||||||
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
||||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||||
InterpretName::Custom(name) => f.write_str(name),
|
InterpretName::Custom(name) => f.write_str(name),
|
||||||
|
|||||||
@@ -67,16 +67,16 @@ impl<T: Topology> Maestro<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
|
|
||||||
let mut score_mut = self.scores.write().expect("Should acquire lock");
|
|
||||||
score_mut.append(&mut scores);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_topology_initialized(&self) -> bool {
|
fn is_topology_initialized(&self) -> bool {
|
||||||
self.topology_state.status == TopologyStatus::Success
|
self.topology_state.status == TopologyStatus::Success
|
||||||
|| self.topology_state.status == TopologyStatus::Noop
|
|| self.topology_state.status == TopologyStatus::Noop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
|
||||||
|
let mut score_mut = self.scores.write().expect("Should acquire lock");
|
||||||
|
score_mut.append(&mut scores);
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
|
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
|
||||||
if !self.is_topology_initialized() {
|
if !self.is_topology_initialized() {
|
||||||
warn!(
|
warn!(
|
||||||
|
|||||||
@@ -1,33 +1,28 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::net::MacAddress;
|
use harmony_types::{
|
||||||
use harmony_types::net::Url;
|
net::{MacAddress, Url},
|
||||||
|
switch::PortLocation,
|
||||||
|
};
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::data::FileContent;
|
use crate::modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy};
|
||||||
use crate::executors::ExecutorError;
|
|
||||||
use crate::topology::PxeOptions;
|
use crate::topology::PxeOptions;
|
||||||
|
use crate::{data::FileContent, modules::okd::crd::nmstate::NMState};
|
||||||
|
use crate::{
|
||||||
|
executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec,
|
||||||
|
};
|
||||||
|
|
||||||
use super::DHCPStaticEntry;
|
use super::{
|
||||||
use super::DhcpServer;
|
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||||
use super::DnsRecord;
|
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
||||||
use super::DnsRecordType;
|
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
||||||
use super::DnsServer;
|
Topology, k8s::K8sClient,
|
||||||
use super::Firewall;
|
};
|
||||||
use super::HttpServer;
|
|
||||||
use super::IpAddress;
|
|
||||||
use super::K8sclient;
|
|
||||||
use super::LoadBalancer;
|
|
||||||
use super::LoadBalancerService;
|
|
||||||
use super::LogicalHost;
|
|
||||||
use super::PreparationError;
|
|
||||||
use super::PreparationOutcome;
|
|
||||||
use super::Router;
|
|
||||||
use super::TftpServer;
|
|
||||||
|
|
||||||
use super::Topology;
|
use std::collections::BTreeMap;
|
||||||
use super::k8s::K8sClient;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@@ -40,10 +35,11 @@ pub struct HAClusterTopology {
|
|||||||
pub tftp_server: Arc<dyn TftpServer>,
|
pub tftp_server: Arc<dyn TftpServer>,
|
||||||
pub http_server: Arc<dyn HttpServer>,
|
pub http_server: Arc<dyn HttpServer>,
|
||||||
pub dns_server: Arc<dyn DnsServer>,
|
pub dns_server: Arc<dyn DnsServer>,
|
||||||
|
pub switch_client: Arc<dyn SwitchClient>,
|
||||||
pub bootstrap_host: LogicalHost,
|
pub bootstrap_host: LogicalHost,
|
||||||
pub control_plane: Vec<LogicalHost>,
|
pub control_plane: Vec<LogicalHost>,
|
||||||
pub workers: Vec<LogicalHost>,
|
pub workers: Vec<LogicalHost>,
|
||||||
pub switch: Vec<LogicalHost>,
|
pub kubeconfig: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -62,9 +58,17 @@ impl Topology for HAClusterTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl K8sclient for HAClusterTopology {
|
impl K8sclient for HAClusterTopology {
|
||||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||||
Ok(Arc::new(
|
match &self.kubeconfig {
|
||||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
None => Ok(Arc::new(
|
||||||
))
|
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||||
|
)),
|
||||||
|
Some(kubeconfig) => {
|
||||||
|
let Some(client) = K8sClient::from_kubeconfig(&kubeconfig).await else {
|
||||||
|
return Err("Failed to create k8s client".to_string());
|
||||||
|
};
|
||||||
|
Ok(Arc::new(client))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,6 +93,193 @@ impl HAClusterTopology {
|
|||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
||||||
|
let k8s_client = self.k8s_client().await?;
|
||||||
|
|
||||||
|
debug!("Installing NMState controller...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState namespace...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState service account...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role binding...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState operator...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
k8s_client
|
||||||
|
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let nmstate = NMState {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("nmstate".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
debug!("Creating NMState: {nmstate:#?}");
|
||||||
|
k8s_client
|
||||||
|
.apply(&nmstate, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_next_bond_id(&self) -> u8 {
|
||||||
|
42 // FIXME: Find a better way to declare the bond id
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
|
self.ensure_nmstate_operator_installed()
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
SwitchError::new(format!(
|
||||||
|
"Can't configure bond, NMState operator not available: {e}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let bond_config = self.create_bond_configuration(config);
|
||||||
|
debug!(
|
||||||
|
"Applying NMState bond config for host {}: {bond_config:#?}",
|
||||||
|
config.host_id
|
||||||
|
);
|
||||||
|
self.k8s_client()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.apply(&bond_config, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to configure bond: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_bond_configuration(
|
||||||
|
&self,
|
||||||
|
config: &HostNetworkConfig,
|
||||||
|
) -> NodeNetworkConfigurationPolicy {
|
||||||
|
let host_name = &config.host_id;
|
||||||
|
let bond_id = self.get_next_bond_id();
|
||||||
|
let bond_name = format!("bond{bond_id}");
|
||||||
|
|
||||||
|
info!("Configuring bond '{bond_name}' for host '{host_name}'...");
|
||||||
|
|
||||||
|
let mut bond_mtu: Option<u32> = None;
|
||||||
|
let mut copy_mac_from: Option<String> = None;
|
||||||
|
let mut bond_ports = Vec::new();
|
||||||
|
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
||||||
|
|
||||||
|
for switch_port in &config.switch_ports {
|
||||||
|
let interface_name = switch_port.interface.name.clone();
|
||||||
|
|
||||||
|
interfaces.push(nmstate::InterfaceSpec {
|
||||||
|
name: interface_name.clone(),
|
||||||
|
description: Some(format!("Member of bond {bond_name}")),
|
||||||
|
r#type: "ethernet".to_string(),
|
||||||
|
state: "up".to_string(),
|
||||||
|
mtu: Some(switch_port.interface.mtu),
|
||||||
|
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: None,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
bond_ports.push(interface_name.clone());
|
||||||
|
|
||||||
|
// Use the first port's details for the bond mtu and mac address
|
||||||
|
if bond_mtu.is_none() {
|
||||||
|
bond_mtu = Some(switch_port.interface.mtu);
|
||||||
|
}
|
||||||
|
if copy_mac_from.is_none() {
|
||||||
|
copy_mac_from = Some(interface_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interfaces.push(nmstate::InterfaceSpec {
|
||||||
|
name: bond_name.clone(),
|
||||||
|
description: Some(format!("Network bond for host {host_name}")),
|
||||||
|
r#type: "bond".to_string(),
|
||||||
|
state: "up".to_string(),
|
||||||
|
copy_mac_from,
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
autoconf: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: Some(nmstate::BondSpec {
|
||||||
|
mode: "802.3ad".to_string(),
|
||||||
|
ports: bond_ports,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
NodeNetworkConfigurationPolicy {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(format!("{host_name}-bond-config")),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: NodeNetworkConfigurationPolicySpec {
|
||||||
|
node_selector: Some(BTreeMap::from([(
|
||||||
|
"kubernetes.io/hostname".to_string(),
|
||||||
|
host_name.to_string(),
|
||||||
|
)])),
|
||||||
|
desired_state: nmstate::DesiredStateSpec { interfaces },
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
|
debug!("Configuring port channel: {config:#?}");
|
||||||
|
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||||
|
|
||||||
|
self.switch_client
|
||||||
|
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn autoload() -> Self {
|
pub fn autoload() -> Self {
|
||||||
let dummy_infra = Arc::new(DummyInfra {});
|
let dummy_infra = Arc::new(DummyInfra {});
|
||||||
let dummy_host = LogicalHost {
|
let dummy_host = LogicalHost {
|
||||||
@@ -97,6 +288,7 @@ impl HAClusterTopology {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "DummyTopology".to_string(),
|
domain_name: "DummyTopology".to_string(),
|
||||||
router: dummy_infra.clone(),
|
router: dummy_infra.clone(),
|
||||||
load_balancer: dummy_infra.clone(),
|
load_balancer: dummy_infra.clone(),
|
||||||
@@ -105,10 +297,10 @@ impl HAClusterTopology {
|
|||||||
tftp_server: dummy_infra.clone(),
|
tftp_server: dummy_infra.clone(),
|
||||||
http_server: dummy_infra.clone(),
|
http_server: dummy_infra.clone(),
|
||||||
dns_server: dummy_infra.clone(),
|
dns_server: dummy_infra.clone(),
|
||||||
|
switch_client: dummy_infra.clone(),
|
||||||
bootstrap_host: dummy_host,
|
bootstrap_host: dummy_host,
|
||||||
control_plane: vec![],
|
control_plane: vec![],
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch: vec![],
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -263,6 +455,27 @@ impl HttpServer for HAClusterTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Switch for HAClusterTopology {
|
||||||
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
|
self.switch_client.setup().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_port_for_mac_address(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
let port = self.switch_client.find_port(mac_address).await?;
|
||||||
|
Ok(port)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
|
self.configure_bond(config).await?;
|
||||||
|
self.configure_port_channel(config).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DummyInfra;
|
pub struct DummyInfra;
|
||||||
|
|
||||||
@@ -332,8 +545,8 @@ impl DhcpServer for DummyInfra {
|
|||||||
}
|
}
|
||||||
async fn set_dhcp_range(
|
async fn set_dhcp_range(
|
||||||
&self,
|
&self,
|
||||||
start: &IpAddress,
|
_start: &IpAddress,
|
||||||
end: &IpAddress,
|
_end: &IpAddress,
|
||||||
) -> Result<(), ExecutorError> {
|
) -> Result<(), ExecutorError> {
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
@@ -449,3 +662,25 @@ impl DnsServer for DummyInfra {
|
|||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl SwitchClient for DummyInfra {
|
||||||
|
async fn setup(&self) -> Result<(), SwitchError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_port(
|
||||||
|
&self,
|
||||||
|
_mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(
|
||||||
|
&self,
|
||||||
|
_channel_name: &str,
|
||||||
|
_switch_ports: Vec<PortLocation>,
|
||||||
|
) -> Result<u8, SwitchError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,14 +5,16 @@ use k8s_openapi::{
|
|||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{
|
api::{
|
||||||
apps::v1::Deployment,
|
apps::v1::Deployment,
|
||||||
core::v1::{Pod, PodStatus},
|
core::v1::{Pod, ServiceAccount},
|
||||||
},
|
},
|
||||||
|
apimachinery::pkg::version::Info,
|
||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Error, Resource,
|
Client, Config, Discovery, Error, Resource,
|
||||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::ErrorResponse,
|
core::ErrorResponse,
|
||||||
|
discovery::{ApiCapabilities, Scope},
|
||||||
error::DiscoveryError,
|
error::DiscoveryError,
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
};
|
};
|
||||||
@@ -21,11 +23,12 @@ use kube::{
|
|||||||
api::{ApiResource, GroupVersionKind},
|
api::{ApiResource, GroupVersionKind},
|
||||||
runtime::wait::await_condition,
|
runtime::wait::await_condition,
|
||||||
};
|
};
|
||||||
use log::{debug, error, trace};
|
use log::{debug, error, info, trace, warn};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::{Value, json};
|
use serde_json::json;
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
use tokio::{io::AsyncReadExt, time::sleep};
|
use tokio::{io::AsyncReadExt, time::sleep};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
#[derive(new, Clone)]
|
#[derive(new, Clone)]
|
||||||
pub struct K8sClient {
|
pub struct K8sClient {
|
||||||
@@ -59,6 +62,22 @@ impl K8sClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
|
||||||
|
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
|
||||||
|
api
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
||||||
|
let client: Client = self.client.clone();
|
||||||
|
let version_info: Info = client.apiserver_version().await?;
|
||||||
|
Ok(version_info)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn discovery(&self) -> Result<Discovery, Error> {
|
||||||
|
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
|
||||||
|
Ok(discovery)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_resource_json_value(
|
pub async fn get_resource_json_value(
|
||||||
&self,
|
&self,
|
||||||
name: &str,
|
name: &str,
|
||||||
@@ -71,7 +90,8 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced_with(self.client.clone(), &gvk)
|
Api::default_namespaced_with(self.client.clone(), &gvk)
|
||||||
};
|
};
|
||||||
Ok(resource.get(name).await?)
|
|
||||||
|
resource.get(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_deployment(
|
pub async fn get_deployment(
|
||||||
@@ -80,11 +100,15 @@ impl K8sClient {
|
|||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
) -> Result<Option<Deployment>, Error> {
|
) -> Result<Option<Deployment>, Error> {
|
||||||
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
||||||
|
debug!("getting namespaced deployment");
|
||||||
Api::namespaced(self.client.clone(), ns)
|
Api::namespaced(self.client.clone(), ns)
|
||||||
} else {
|
} else {
|
||||||
|
debug!("getting default namespace deployment");
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
Ok(deps.get_opt(name).await?)
|
|
||||||
|
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
||||||
|
deps.get_opt(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
||||||
@@ -93,7 +117,8 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
Ok(pods.get_opt(name).await?)
|
|
||||||
|
pods.get_opt(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn scale_deployment(
|
pub async fn scale_deployment(
|
||||||
@@ -114,7 +139,7 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
let pp = PatchParams::default();
|
let pp = PatchParams::default();
|
||||||
let scale = Patch::Apply(&patch);
|
let scale = Patch::Merge(&patch);
|
||||||
deployments.patch_scale(name, &pp, &scale).await?;
|
deployments.patch_scale(name, &pp, &scale).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -136,9 +161,9 @@ impl K8sClient {
|
|||||||
|
|
||||||
pub async fn wait_until_deployment_ready(
|
pub async fn wait_until_deployment_ready(
|
||||||
&self,
|
&self,
|
||||||
name: String,
|
name: &str,
|
||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
timeout: Option<u64>,
|
timeout: Option<Duration>,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let api: Api<Deployment>;
|
let api: Api<Deployment>;
|
||||||
|
|
||||||
@@ -148,9 +173,9 @@ impl K8sClient {
|
|||||||
api = Api::default_namespaced(self.client.clone());
|
api = Api::default_namespaced(self.client.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
let establish = await_condition(api, name, conditions::is_deployment_completed());
|
||||||
let t = timeout.unwrap_or(300);
|
let timeout = timeout.unwrap_or(Duration::from_secs(120));
|
||||||
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
|
let res = tokio::time::timeout(timeout, establish).await;
|
||||||
|
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -240,7 +265,7 @@ impl K8sClient {
|
|||||||
|
|
||||||
if let Some(s) = status.status {
|
if let Some(s) = status.status {
|
||||||
let mut stdout_buf = String::new();
|
let mut stdout_buf = String::new();
|
||||||
if let Some(mut stdout) = process.stdout().take() {
|
if let Some(mut stdout) = process.stdout() {
|
||||||
stdout
|
stdout
|
||||||
.read_to_string(&mut stdout_buf)
|
.read_to_string(&mut stdout_buf)
|
||||||
.await
|
.await
|
||||||
@@ -346,14 +371,14 @@ impl K8sClient {
|
|||||||
Ok(current) => {
|
Ok(current) => {
|
||||||
trace!("Received current value {current:#?}");
|
trace!("Received current value {current:#?}");
|
||||||
// The resource exists, so we calculate and display a diff.
|
// The resource exists, so we calculate and display a diff.
|
||||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
println!("\nPerforming dry-run for resource: '{name}'");
|
||||||
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
||||||
panic!("Could not serialize current value : {current:#?}")
|
panic!("Could not serialize current value : {current:#?}")
|
||||||
});
|
});
|
||||||
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
||||||
let map = current_yaml.as_mapping_mut().unwrap();
|
let map = current_yaml.as_mapping_mut().unwrap();
|
||||||
let removed = map.remove_entry("status");
|
let removed = map.remove_entry("status");
|
||||||
trace!("Removed status {:?}", removed);
|
trace!("Removed status {removed:?}");
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"Did not find status entry for current object {}/{}",
|
"Did not find status entry for current object {}/{}",
|
||||||
@@ -382,14 +407,14 @@ impl K8sClient {
|
|||||||
similar::ChangeTag::Insert => "+",
|
similar::ChangeTag::Insert => "+",
|
||||||
similar::ChangeTag::Equal => " ",
|
similar::ChangeTag::Equal => " ",
|
||||||
};
|
};
|
||||||
print!("{}{}", sign, change);
|
print!("{sign}{change}");
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been applied.
|
// In a dry run, we return the new resource state that would have been applied.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||||
// The resource does not exist, so the "diff" is the entire new resource.
|
// The resource does not exist, so the "diff" is the entire new resource.
|
||||||
println!("\nPerforming dry-run for new resource: '{}'", name);
|
println!("\nPerforming dry-run for new resource: '{name}'");
|
||||||
println!(
|
println!(
|
||||||
"Resource does not exist. It would be created with the following content:"
|
"Resource does not exist. It would be created with the following content:"
|
||||||
);
|
);
|
||||||
@@ -398,14 +423,14 @@ impl K8sClient {
|
|||||||
|
|
||||||
// Print each line of the new resource with a '+' prefix.
|
// Print each line of the new resource with a '+' prefix.
|
||||||
for line in new_yaml.lines() {
|
for line in new_yaml.lines() {
|
||||||
println!("+{}", line);
|
println!("+{line}");
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been created.
|
// In a dry run, we return the new resource state that would have been created.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// Another API error occurred.
|
// Another API error occurred.
|
||||||
error!("Failed to get resource '{}': {}", name, e);
|
error!("Failed to get resource '{name}': {e}");
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -420,7 +445,7 @@ impl K8sClient {
|
|||||||
where
|
where
|
||||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
for r in resource.iter() {
|
for r in resource.iter() {
|
||||||
@@ -485,10 +510,7 @@ impl K8sClient {
|
|||||||
|
|
||||||
// 6. Apply the object to the cluster using Server-Side Apply.
|
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||||
// This will create the resource if it doesn't exist, or update it if it does.
|
// This will create the resource if it doesn't exist, or update it if it does.
|
||||||
println!(
|
println!("Applying '{name}' in namespace '{namespace}'...",);
|
||||||
"Applying Argo Application '{}' in namespace '{}'...",
|
|
||||||
name, namespace
|
|
||||||
);
|
|
||||||
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
||||||
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||||
|
|
||||||
@@ -497,6 +519,51 @@ impl K8sClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Apply a resource from a URL
|
||||||
|
///
|
||||||
|
/// It is the equivalent of `kubectl apply -f <url>`
|
||||||
|
pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
|
||||||
|
let patch_params = PatchParams::apply("harmony");
|
||||||
|
let discovery = kube::Discovery::new(self.client.clone()).run().await?;
|
||||||
|
|
||||||
|
let yaml = reqwest::get(url)
|
||||||
|
.await
|
||||||
|
.expect("Could not get URL")
|
||||||
|
.text()
|
||||||
|
.await
|
||||||
|
.expect("Could not get content from URL");
|
||||||
|
|
||||||
|
for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") {
|
||||||
|
let obj: DynamicObject =
|
||||||
|
serde_yaml::from_value(doc).expect("cannot apply without valid YAML");
|
||||||
|
let namespace = obj.metadata.namespace.as_deref().or(ns);
|
||||||
|
let type_meta = obj
|
||||||
|
.types
|
||||||
|
.as_ref()
|
||||||
|
.expect("cannot apply object without valid TypeMeta");
|
||||||
|
let gvk = GroupVersionKind::try_from(type_meta)
|
||||||
|
.expect("cannot apply object without valid GroupVersionKind");
|
||||||
|
let name = obj.name_any();
|
||||||
|
|
||||||
|
if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
|
||||||
|
let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
|
||||||
|
trace!(
|
||||||
|
"Applying {}: \n{}",
|
||||||
|
gvk.kind,
|
||||||
|
serde_yaml::to_string(&obj).expect("Failed to serialize YAML")
|
||||||
|
);
|
||||||
|
let data: serde_json::Value =
|
||||||
|
serde_json::to_value(&obj).expect("Failed to serialize JSON");
|
||||||
|
let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
|
||||||
|
debug!("applied {} {}", gvk.kind, name);
|
||||||
|
} else {
|
||||||
|
warn!("Cannot apply document for unknown {gvk:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||||
let k = match Kubeconfig::read_from(path) {
|
let k = match Kubeconfig::read_from(path) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
@@ -516,6 +583,31 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_dynamic_api(
|
||||||
|
resource: ApiResource,
|
||||||
|
capabilities: ApiCapabilities,
|
||||||
|
client: Client,
|
||||||
|
ns: Option<&str>,
|
||||||
|
all: bool,
|
||||||
|
) -> Api<DynamicObject> {
|
||||||
|
if capabilities.scope == Scope::Cluster || all {
|
||||||
|
Api::all_with(client, &resource)
|
||||||
|
} else if let Some(namespace) = ns {
|
||||||
|
Api::namespaced_with(client, namespace, &resource)
|
||||||
|
} else {
|
||||||
|
Api::default_namespaced_with(client, &resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
|
||||||
|
use serde::Deserialize;
|
||||||
|
let mut docs = vec![];
|
||||||
|
for de in serde_yaml::Deserializer::from_str(data) {
|
||||||
|
docs.push(serde_yaml::Value::deserialize(de)?);
|
||||||
|
}
|
||||||
|
Ok(docs)
|
||||||
|
}
|
||||||
|
|
||||||
pub trait ApplyStrategy<K: Resource> {
|
pub trait ApplyStrategy<K: Resource> {
|
||||||
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,12 @@
|
|||||||
use std::{process::Command, sync::Arc};
|
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use kube::api::GroupVersionKind;
|
use base64::{Engine, engine::general_purpose};
|
||||||
|
use k8s_openapi::api::{
|
||||||
|
core::v1::Secret,
|
||||||
|
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
|
||||||
|
};
|
||||||
|
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
@@ -12,14 +17,26 @@ use crate::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
k3d::K3DInstallationScore,
|
k3d::K3DInstallationScore,
|
||||||
monitoring::kube_prometheus::crd::{
|
k8s::ingress::{K8sIngressScore, PathType},
|
||||||
crd_alertmanager_config::CRDPrometheus,
|
monitoring::{
|
||||||
prometheus_operator::prometheus_operator_helm_chart_score,
|
grafana::{grafana::Grafana, helm::helm_grafana::grafana_helm_chart_score},
|
||||||
rhob_alertmanager_config::RHOBObservability,
|
kube_prometheus::crd::{
|
||||||
|
crd_alertmanager_config::CRDPrometheus,
|
||||||
|
crd_grafana::{
|
||||||
|
Grafana as GrafanaCRD, GrafanaCom, GrafanaDashboard,
|
||||||
|
GrafanaDashboardDatasource, GrafanaDashboardSpec, GrafanaDatasource,
|
||||||
|
GrafanaDatasourceConfig, GrafanaDatasourceJsonData,
|
||||||
|
GrafanaDatasourceSecureJsonData, GrafanaDatasourceSpec, GrafanaSpec,
|
||||||
|
},
|
||||||
|
crd_prometheuses::LabelSelector,
|
||||||
|
prometheus_operator::prometheus_operator_helm_chart_score,
|
||||||
|
rhob_alertmanager_config::RHOBObservability,
|
||||||
|
service_monitor::ServiceMonitor,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
prometheus::{
|
prometheus::{
|
||||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||||
prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
@@ -47,6 +64,13 @@ struct K8sState {
|
|||||||
message: String,
|
message: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum KubernetesDistribution {
|
||||||
|
OpenshiftFamily,
|
||||||
|
K3sFamily,
|
||||||
|
Default,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
enum K8sSource {
|
enum K8sSource {
|
||||||
LocalK3d,
|
LocalK3d,
|
||||||
@@ -57,6 +81,7 @@ enum K8sSource {
|
|||||||
pub struct K8sAnywhereTopology {
|
pub struct K8sAnywhereTopology {
|
||||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
||||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
||||||
|
k8s_distribution: Arc<OnceCell<KubernetesDistribution>>,
|
||||||
config: Arc<K8sAnywhereConfig>,
|
config: Arc<K8sAnywhereConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,41 +103,172 @@ impl K8sclient for K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
impl Grafana for K8sAnywhereTopology {
|
||||||
|
async fn ensure_grafana_operator(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
debug!("ensure grafana operator");
|
||||||
|
let client = self.k8s_client().await.unwrap();
|
||||||
|
let grafana_gvk = GroupVersionKind {
|
||||||
|
group: "grafana.integreatly.org".to_string(),
|
||||||
|
version: "v1beta1".to_string(),
|
||||||
|
kind: "Grafana".to_string(),
|
||||||
|
};
|
||||||
|
let name = "grafanas.grafana.integreatly.org";
|
||||||
|
let ns = "grafana";
|
||||||
|
|
||||||
|
let grafana_crd = client
|
||||||
|
.get_resource_json_value(name, Some(ns), &grafana_gvk)
|
||||||
|
.await;
|
||||||
|
match grafana_crd {
|
||||||
|
Ok(_) => {
|
||||||
|
return Ok(PreparationOutcome::Success {
|
||||||
|
details: "Found grafana CRDs in cluster".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(_) => {
|
||||||
|
return self
|
||||||
|
.install_grafana_operator(inventory, Some("grafana"))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
let ns = "grafana";
|
||||||
|
|
||||||
|
let mut label = BTreeMap::new();
|
||||||
|
|
||||||
|
label.insert("dashboards".to_string(), "grafana".to_string());
|
||||||
|
|
||||||
|
let label_selector = LabelSelector {
|
||||||
|
match_labels: label.clone(),
|
||||||
|
match_expressions: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = self.k8s_client().await?;
|
||||||
|
|
||||||
|
let grafana = self.build_grafana(ns, &label);
|
||||||
|
|
||||||
|
client.apply(&grafana, Some(ns)).await?;
|
||||||
|
//TODO change this to a ensure ready or something better than just a timeout
|
||||||
|
client
|
||||||
|
.wait_until_deployment_ready(
|
||||||
|
"grafana-grafana-deployment",
|
||||||
|
Some("grafana"),
|
||||||
|
Some(Duration::from_secs(30)),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let sa_name = "grafana-grafana-sa";
|
||||||
|
let token_secret_name = "grafana-sa-token-secret";
|
||||||
|
|
||||||
|
let sa_token_secret = self.build_sa_token_secret(token_secret_name, sa_name, ns);
|
||||||
|
|
||||||
|
client.apply(&sa_token_secret, Some(ns)).await?;
|
||||||
|
let secret_gvk = GroupVersionKind {
|
||||||
|
group: "".to_string(),
|
||||||
|
version: "v1".to_string(),
|
||||||
|
kind: "Secret".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let secret = client
|
||||||
|
.get_resource_json_value(token_secret_name, Some(ns), &secret_gvk)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let token = format!(
|
||||||
|
"Bearer {}",
|
||||||
|
self.extract_and_normalize_token(&secret).unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
debug!("creating grafana clusterrole binding");
|
||||||
|
|
||||||
|
let clusterrolebinding =
|
||||||
|
self.build_cluster_rolebinding(sa_name, "cluster-monitoring-view", ns);
|
||||||
|
|
||||||
|
client.apply(&clusterrolebinding, Some(ns)).await?;
|
||||||
|
|
||||||
|
debug!("creating grafana datasource crd");
|
||||||
|
|
||||||
|
let thanos_url = format!(
|
||||||
|
"https://{}",
|
||||||
|
self.get_domain("thanos-querier-openshift-monitoring")
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
let thanos_openshift_datasource = self.build_grafana_datasource(
|
||||||
|
"thanos-openshift-monitoring",
|
||||||
|
ns,
|
||||||
|
&label_selector,
|
||||||
|
&thanos_url,
|
||||||
|
&token,
|
||||||
|
);
|
||||||
|
|
||||||
|
client.apply(&thanos_openshift_datasource, Some(ns)).await?;
|
||||||
|
|
||||||
|
debug!("creating grafana dashboard crd");
|
||||||
|
let dashboard = self.build_grafana_dashboard(ns, &label_selector);
|
||||||
|
|
||||||
|
client.apply(&dashboard, Some(ns)).await?;
|
||||||
|
debug!("creating grafana ingress");
|
||||||
|
let grafana_ingress = self.build_grafana_ingress(ns).await;
|
||||||
|
|
||||||
|
grafana_ingress
|
||||||
|
.interpret(&Inventory::empty(), self)
|
||||||
|
.await
|
||||||
|
.map_err(|e| PreparationError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(PreparationOutcome::Success {
|
||||||
|
details: "Installed grafana composants".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl PrometheusMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
||||||
async fn install_prometheus(
|
async fn install_prometheus(
|
||||||
&self,
|
&self,
|
||||||
sender: &CRDPrometheus,
|
sender: &CRDPrometheus,
|
||||||
inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
_receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||||
|
) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
let client = self.k8s_client().await?;
|
||||||
|
|
||||||
|
for monitor in sender.service_monitor.iter() {
|
||||||
|
client
|
||||||
|
.apply(monitor, Some(&sender.namespace))
|
||||||
|
.await
|
||||||
|
.map_err(|e| PreparationError::new(e.to_string()))?;
|
||||||
|
}
|
||||||
|
Ok(PreparationOutcome::Success {
|
||||||
|
details: "successfuly installed prometheus components".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_prometheus_operator(
|
||||||
|
&self,
|
||||||
|
sender: &CRDPrometheus,
|
||||||
|
_inventory: &Inventory,
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
) -> Result<PreparationOutcome, PreparationError> {
|
||||||
let po_result = self.ensure_prometheus_operator(sender).await?;
|
let po_result = self.ensure_prometheus_operator(sender).await?;
|
||||||
|
|
||||||
if po_result == PreparationOutcome::Noop {
|
match po_result {
|
||||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
PreparationOutcome::Success { details: _ } => {
|
||||||
return Ok(po_result);
|
debug!("Detected prometheus crds operator present in cluster.");
|
||||||
}
|
return Ok(po_result);
|
||||||
|
}
|
||||||
let result = self
|
PreparationOutcome::Noop => {
|
||||||
.get_k8s_prometheus_application_score(sender.clone(), receivers)
|
debug!("Skipping Prometheus CR installation due to missing operator.");
|
||||||
.await
|
return Ok(po_result);
|
||||||
.interpret(inventory, self)
|
}
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
|
||||||
details: outcome.message,
|
|
||||||
}),
|
|
||||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
|
||||||
_ => Err(PreparationError::new(outcome.message)),
|
|
||||||
},
|
|
||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
||||||
async fn install_prometheus(
|
async fn install_prometheus(
|
||||||
&self,
|
&self,
|
||||||
sender: &RHOBObservability,
|
sender: &RHOBObservability,
|
||||||
@@ -146,6 +302,14 @@ impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology
|
|||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn ensure_prometheus_operator(
|
||||||
|
&self,
|
||||||
|
sender: &RHOBObservability,
|
||||||
|
inventory: &Inventory,
|
||||||
|
) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for K8sAnywhereTopology {
|
impl Serialize for K8sAnywhereTopology {
|
||||||
@@ -162,6 +326,7 @@ impl K8sAnywhereTopology {
|
|||||||
Self {
|
Self {
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
k8s_state: Arc::new(OnceCell::new()),
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
tenant_manager: Arc::new(OnceCell::new()),
|
||||||
|
k8s_distribution: Arc::new(OnceCell::new()),
|
||||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
config: Arc::new(K8sAnywhereConfig::from_env()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -170,10 +335,216 @@ impl K8sAnywhereTopology {
|
|||||||
Self {
|
Self {
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
k8s_state: Arc::new(OnceCell::new()),
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
tenant_manager: Arc::new(OnceCell::new()),
|
||||||
|
k8s_distribution: Arc::new(OnceCell::new()),
|
||||||
config: Arc::new(config),
|
config: Arc::new(config),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
|
||||||
|
self.k8s_distribution
|
||||||
|
.get_or_try_init(async || {
|
||||||
|
let client = self.k8s_client().await.unwrap();
|
||||||
|
|
||||||
|
let discovery = client.discovery().await.map_err(|e| {
|
||||||
|
PreparationError::new(format!("Could not discover API groups: {}", e))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let version = client.get_apiserver_version().await.map_err(|e| {
|
||||||
|
PreparationError::new(format!("Could not get server version: {}", e))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// OpenShift / OKD
|
||||||
|
if discovery
|
||||||
|
.groups()
|
||||||
|
.any(|g| g.name() == "project.openshift.io")
|
||||||
|
{
|
||||||
|
return Ok(KubernetesDistribution::OpenshiftFamily);
|
||||||
|
}
|
||||||
|
|
||||||
|
// K3d / K3s
|
||||||
|
if version.git_version.contains("k3s") {
|
||||||
|
return Ok(KubernetesDistribution::K3sFamily);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(KubernetesDistribution::Default);
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_and_normalize_token(&self, secret: &DynamicObject) -> Option<String> {
|
||||||
|
let token_b64 = secret
|
||||||
|
.data
|
||||||
|
.get("token")
|
||||||
|
.or_else(|| secret.data.get("data").and_then(|d| d.get("token")))
|
||||||
|
.and_then(|v| v.as_str())?;
|
||||||
|
|
||||||
|
let bytes = general_purpose::STANDARD.decode(token_b64).ok()?;
|
||||||
|
|
||||||
|
let s = String::from_utf8(bytes).ok()?;
|
||||||
|
|
||||||
|
let cleaned = s
|
||||||
|
.trim_matches(|c: char| c.is_whitespace() || c == '\0')
|
||||||
|
.to_string();
|
||||||
|
Some(cleaned)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build_cluster_rolebinding(
|
||||||
|
&self,
|
||||||
|
service_account_name: &str,
|
||||||
|
clusterrole_name: &str,
|
||||||
|
ns: &str,
|
||||||
|
) -> ClusterRoleBinding {
|
||||||
|
ClusterRoleBinding {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(format!("{}-view-binding", service_account_name)),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
role_ref: RoleRef {
|
||||||
|
api_group: "rbac.authorization.k8s.io".into(),
|
||||||
|
kind: "ClusterRole".into(),
|
||||||
|
name: clusterrole_name.into(),
|
||||||
|
},
|
||||||
|
subjects: Some(vec![Subject {
|
||||||
|
kind: "ServiceAccount".into(),
|
||||||
|
name: service_account_name.into(),
|
||||||
|
namespace: Some(ns.into()),
|
||||||
|
..Default::default()
|
||||||
|
}]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build_sa_token_secret(
|
||||||
|
&self,
|
||||||
|
secret_name: &str,
|
||||||
|
service_account_name: &str,
|
||||||
|
ns: &str,
|
||||||
|
) -> Secret {
|
||||||
|
let mut annotations = BTreeMap::new();
|
||||||
|
annotations.insert(
|
||||||
|
"kubernetes.io/service-account.name".to_string(),
|
||||||
|
service_account_name.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Secret {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(secret_name.into()),
|
||||||
|
namespace: Some(ns.into()),
|
||||||
|
annotations: Some(annotations),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
type_: Some("kubernetes.io/service-account-token".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_grafana_datasource(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
ns: &str,
|
||||||
|
label_selector: &LabelSelector,
|
||||||
|
url: &str,
|
||||||
|
token: &str,
|
||||||
|
) -> GrafanaDatasource {
|
||||||
|
let mut json_data = BTreeMap::new();
|
||||||
|
json_data.insert("timeInterval".to_string(), "5s".to_string());
|
||||||
|
|
||||||
|
GrafanaDatasource {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(name.to_string()),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: GrafanaDatasourceSpec {
|
||||||
|
instance_selector: label_selector.clone(),
|
||||||
|
allow_cross_namespace_import: Some(true),
|
||||||
|
values_from: None,
|
||||||
|
datasource: GrafanaDatasourceConfig {
|
||||||
|
access: "proxy".to_string(),
|
||||||
|
name: name.to_string(),
|
||||||
|
r#type: "prometheus".to_string(),
|
||||||
|
url: url.to_string(),
|
||||||
|
database: None,
|
||||||
|
json_data: Some(GrafanaDatasourceJsonData {
|
||||||
|
time_interval: Some("60s".to_string()),
|
||||||
|
http_header_name1: Some("Authorization".to_string()),
|
||||||
|
tls_skip_verify: Some(true),
|
||||||
|
oauth_pass_thru: Some(true),
|
||||||
|
}),
|
||||||
|
secure_json_data: Some(GrafanaDatasourceSecureJsonData {
|
||||||
|
http_header_value1: Some(format!("Bearer {token}")),
|
||||||
|
}),
|
||||||
|
is_default: Some(false),
|
||||||
|
editable: Some(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_grafana_dashboard(
|
||||||
|
&self,
|
||||||
|
ns: &str,
|
||||||
|
label_selector: &LabelSelector,
|
||||||
|
) -> GrafanaDashboard {
|
||||||
|
let graf_dashboard = GrafanaDashboard {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(format!("grafana-dashboard-{}", ns)),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: GrafanaDashboardSpec {
|
||||||
|
resync_period: Some("30s".to_string()),
|
||||||
|
instance_selector: label_selector.clone(),
|
||||||
|
datasources: Some(vec![GrafanaDashboardDatasource {
|
||||||
|
input_name: "DS_PROMETHEUS".to_string(),
|
||||||
|
datasource_name: "thanos-openshift-monitoring".to_string(),
|
||||||
|
}]),
|
||||||
|
json: None,
|
||||||
|
grafana_com: Some(GrafanaCom {
|
||||||
|
id: 17406,
|
||||||
|
revision: None,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
graf_dashboard
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_grafana(&self, ns: &str, labels: &BTreeMap<String, String>) -> GrafanaCRD {
|
||||||
|
let grafana = GrafanaCRD {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(format!("grafana-{}", ns)),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
labels: Some(labels.clone()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: GrafanaSpec {
|
||||||
|
config: None,
|
||||||
|
admin_user: None,
|
||||||
|
admin_password: None,
|
||||||
|
ingress: None,
|
||||||
|
persistence: None,
|
||||||
|
resources: None,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
grafana
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_grafana_ingress(&self, ns: &str) -> K8sIngressScore {
|
||||||
|
let domain = self.get_domain(&format!("grafana-{}", ns)).await.unwrap();
|
||||||
|
let name = format!("{}-grafana", ns);
|
||||||
|
let backend_service = format!("grafana-{}-service", ns);
|
||||||
|
|
||||||
|
K8sIngressScore {
|
||||||
|
name: fqdn::fqdn!(&name),
|
||||||
|
host: fqdn::fqdn!(&domain),
|
||||||
|
backend_service: fqdn::fqdn!(&backend_service),
|
||||||
|
port: 3000,
|
||||||
|
path: Some("/".to_string()),
|
||||||
|
path_type: Some(PathType::Prefix),
|
||||||
|
namespace: Some(fqdn::fqdn!(&ns)),
|
||||||
|
ingress_class_name: Some("openshift-default".to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
async fn get_cluster_observability_operator_prometheus_application_score(
|
||||||
&self,
|
&self,
|
||||||
sender: RHOBObservability,
|
sender: RHOBObservability,
|
||||||
@@ -191,13 +562,14 @@ impl K8sAnywhereTopology {
|
|||||||
&self,
|
&self,
|
||||||
sender: CRDPrometheus,
|
sender: CRDPrometheus,
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||||
|
service_monitors: Option<Vec<ServiceMonitor>>,
|
||||||
) -> K8sPrometheusCRDAlertingScore {
|
) -> K8sPrometheusCRDAlertingScore {
|
||||||
K8sPrometheusCRDAlertingScore {
|
return K8sPrometheusCRDAlertingScore {
|
||||||
sender,
|
sender,
|
||||||
receivers: receivers.unwrap_or_default(),
|
receivers: receivers.unwrap_or_default(),
|
||||||
service_monitors: vec![],
|
service_monitors: service_monitors.unwrap_or_default(),
|
||||||
prometheus_rules: vec![],
|
prometheus_rules: vec![],
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
||||||
@@ -465,6 +837,30 @@ impl K8sAnywhereTopology {
|
|||||||
details: "prometheus operator present in cluster".into(),
|
details: "prometheus operator present in cluster".into(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn install_grafana_operator(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
ns: Option<&str>,
|
||||||
|
) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
let namespace = ns.unwrap_or("grafana");
|
||||||
|
info!("installing grafana operator in ns {namespace}");
|
||||||
|
let tenant = self.get_k8s_tenant_manager()?.get_tenant_config().await;
|
||||||
|
let mut namespace_scope = false;
|
||||||
|
if tenant.is_some() {
|
||||||
|
namespace_scope = true;
|
||||||
|
}
|
||||||
|
let _grafana_operator_score = grafana_helm_chart_score(namespace, namespace_scope)
|
||||||
|
.interpret(inventory, self)
|
||||||
|
.await
|
||||||
|
.map_err(|e| PreparationError::new(e.to_string()));
|
||||||
|
Ok(PreparationOutcome::Success {
|
||||||
|
details: format!(
|
||||||
|
"Successfully installed grafana operator in ns {}",
|
||||||
|
ns.unwrap()
|
||||||
|
),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
|
|||||||
@@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
service: &LoadBalancerService,
|
service: &LoadBalancerService,
|
||||||
) -> Result<(), ExecutorError> {
|
) -> Result<(), ExecutorError> {
|
||||||
debug!(
|
self.add_service(service).await?;
|
||||||
"Listing LoadBalancer services {:?}",
|
|
||||||
self.list_services().await
|
|
||||||
);
|
|
||||||
if !self.list_services().await.contains(service) {
|
|
||||||
self.add_service(service).await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
|
pub mod opnsense;
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
mod http;
|
mod http;
|
||||||
|
|||||||
@@ -1,10 +1,21 @@
|
|||||||
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
|
use std::{
|
||||||
|
error::Error,
|
||||||
|
fmt::{self, Debug},
|
||||||
|
net::Ipv4Addr,
|
||||||
|
str::FromStr,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::net::{IpAddress, MacAddress};
|
use derive_new::new;
|
||||||
|
use harmony_types::{
|
||||||
|
id::Id,
|
||||||
|
net::{IpAddress, MacAddress},
|
||||||
|
switch::PortLocation,
|
||||||
|
};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::executors::ExecutorError;
|
use crate::{executors::ExecutorError, hardware::PhysicalHost};
|
||||||
|
|
||||||
use super::{LogicalHost, k8s::K8sClient};
|
use super::{LogicalHost, k8s::K8sClient};
|
||||||
|
|
||||||
@@ -15,8 +26,8 @@ pub struct DHCPStaticEntry {
|
|||||||
pub ip: Ipv4Addr,
|
pub ip: Ipv4Addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for DHCPStaticEntry {
|
impl fmt::Display for DHCPStaticEntry {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
let mac = self
|
let mac = self
|
||||||
.mac
|
.mac
|
||||||
.iter()
|
.iter()
|
||||||
@@ -38,8 +49,8 @@ pub trait Firewall: Send + Sync {
|
|||||||
fn get_host(&self) -> LogicalHost;
|
fn get_host(&self) -> LogicalHost;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for dyn Firewall {
|
impl Debug for dyn Firewall {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
|
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -61,7 +72,7 @@ pub struct PxeOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
|
pub trait DhcpServer: Send + Sync + Debug {
|
||||||
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
||||||
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
||||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
||||||
@@ -100,8 +111,8 @@ pub trait DnsServer: Send + Sync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for dyn DnsServer {
|
impl Debug for dyn DnsServer {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
|
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,8 +148,8 @@ pub enum DnsRecordType {
|
|||||||
TXT,
|
TXT,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for DnsRecordType {
|
impl fmt::Display for DnsRecordType {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
DnsRecordType::A => write!(f, "A"),
|
DnsRecordType::A => write!(f, "A"),
|
||||||
DnsRecordType::AAAA => write!(f, "AAAA"),
|
DnsRecordType::AAAA => write!(f, "AAAA"),
|
||||||
@@ -172,6 +183,77 @@ impl FromStr for DnsRecordType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Switch: Send + Sync {
|
||||||
|
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
||||||
|
|
||||||
|
async fn get_port_for_mac_address(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError>;
|
||||||
|
|
||||||
|
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct HostNetworkConfig {
|
||||||
|
pub host_id: Id,
|
||||||
|
pub switch_ports: Vec<SwitchPort>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct SwitchPort {
|
||||||
|
pub interface: NetworkInterface,
|
||||||
|
pub port: PortLocation,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct NetworkInterface {
|
||||||
|
pub name: String,
|
||||||
|
pub mac_address: MacAddress,
|
||||||
|
pub speed_mbps: Option<u32>,
|
||||||
|
pub mtu: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, new)]
|
||||||
|
pub struct SwitchError {
|
||||||
|
msg: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for SwitchError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.write_str(&self.msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for SwitchError {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait SwitchClient: Debug + Send + Sync {
|
||||||
|
/// Executes essential, idempotent, one-time initial configuration steps.
|
||||||
|
///
|
||||||
|
/// This is an opiniated procedure that setups a switch to provide high availability
|
||||||
|
/// capabilities as decided by the NationTech team.
|
||||||
|
///
|
||||||
|
/// This includes tasks like enabling switchport for all interfaces
|
||||||
|
/// except the ones intended for Fabric Networking, etc.
|
||||||
|
///
|
||||||
|
/// The implementation must ensure the operation is **idempotent** (safe to run multiple times)
|
||||||
|
/// and that it doesn't break existing configurations.
|
||||||
|
async fn setup(&self) -> Result<(), SwitchError>;
|
||||||
|
|
||||||
|
async fn find_port(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError>;
|
||||||
|
|
||||||
|
async fn configure_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_name: &str,
|
||||||
|
switch_ports: Vec<PortLocation>,
|
||||||
|
) -> Result<u8, SwitchError>;
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ pub struct AlertingInterpret<S: AlertSender> {
|
|||||||
pub sender: S,
|
pub sender: S,
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
||||||
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
||||||
|
pub scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -30,6 +31,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
debug!("hit sender configure for AlertingInterpret");
|
||||||
self.sender.configure(inventory, topology).await?;
|
self.sender.configure(inventory, topology).await?;
|
||||||
for receiver in self.receivers.iter() {
|
for receiver in self.receivers.iter() {
|
||||||
receiver.install(&self.sender).await?;
|
receiver.install(&self.sender).await?;
|
||||||
@@ -38,6 +40,12 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
|||||||
debug!("installing rule: {:#?}", rule);
|
debug!("installing rule: {:#?}", rule);
|
||||||
rule.install(&self.sender).await?;
|
rule.install(&self.sender).await?;
|
||||||
}
|
}
|
||||||
|
if let Some(targets) = &self.scrape_targets {
|
||||||
|
for target in targets.iter() {
|
||||||
|
debug!("installing scrape_target: {:#?}", target);
|
||||||
|
target.install(&self.sender).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
self.sender.ensure_installed(inventory, topology).await?;
|
self.sender.ensure_installed(inventory, topology).await?;
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"successfully installed alert sender {}",
|
"successfully installed alert sender {}",
|
||||||
@@ -77,6 +85,7 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait ScrapeTarget<S: AlertSender> {
|
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||||
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
|
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
||||||
|
fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>;
|
||||||
}
|
}
|
||||||
|
|||||||
23
harmony/src/domain/topology/opnsense.rs
Normal file
23
harmony/src/domain/topology/opnsense.rs
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use log::info;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
infra::opnsense::OPNSenseFirewall,
|
||||||
|
topology::{PreparationError, PreparationOutcome, Topology},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Topology for OPNSenseFirewall {
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
// FIXME we should be initializing the opnsense config here instead of
|
||||||
|
// OPNSenseFirewall::new as this causes the config to be loaded too early in
|
||||||
|
// harmony initialization process
|
||||||
|
let details = "OPNSenseFirewall topology is ready".to_string();
|
||||||
|
info!("{}", details);
|
||||||
|
Ok(PreparationOutcome::Success { details })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"OPNSenseFirewall"
|
||||||
|
}
|
||||||
|
}
|
||||||
378
harmony/src/infra/brocade.rs
Normal file
378
harmony/src/infra/brocade.rs
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||||
|
use harmony_types::{
|
||||||
|
net::{IpAddress, MacAddress},
|
||||||
|
switch::{PortDeclaration, PortLocation},
|
||||||
|
};
|
||||||
|
use option_ext::OptionExt;
|
||||||
|
|
||||||
|
use crate::topology::{SwitchClient, SwitchError};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BrocadeSwitchClient {
|
||||||
|
brocade: Box<dyn BrocadeClient + Send + Sync>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BrocadeSwitchClient {
|
||||||
|
pub async fn init(
|
||||||
|
ip_addresses: &[IpAddress],
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: Option<BrocadeOptions>,
|
||||||
|
) -> Result<Self, brocade::Error> {
|
||||||
|
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
|
||||||
|
Ok(Self { brocade })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl SwitchClient for BrocadeSwitchClient {
|
||||||
|
async fn setup(&self) -> Result<(), SwitchError> {
|
||||||
|
let stack_topology = self
|
||||||
|
.brocade
|
||||||
|
.get_stack_topology()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
let interfaces = self
|
||||||
|
.brocade
|
||||||
|
.get_interfaces()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
let interfaces: Vec<(String, PortOperatingMode)> = interfaces
|
||||||
|
.into_iter()
|
||||||
|
.filter(|interface| {
|
||||||
|
interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected
|
||||||
|
})
|
||||||
|
.filter(|interface| {
|
||||||
|
!stack_topology.iter().any(|link: &InterSwitchLink| {
|
||||||
|
link.local_port == interface.port_location
|
||||||
|
|| link.remote_port.contains(&interface.port_location)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.map(|interface| (interface.name.clone(), PortOperatingMode::Access))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if interfaces.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.brocade
|
||||||
|
.configure_interfaces(interfaces)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_port(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
let table = self
|
||||||
|
.brocade
|
||||||
|
.get_mac_address_table()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
||||||
|
|
||||||
|
let port = table
|
||||||
|
.iter()
|
||||||
|
.find(|entry| entry.mac_address == *mac_address)
|
||||||
|
.map(|entry| match &entry.port {
|
||||||
|
PortDeclaration::Single(port_location) => Ok(port_location.clone()),
|
||||||
|
_ => Err(SwitchError::new(
|
||||||
|
"Multiple ports found for MAC address".into(),
|
||||||
|
)),
|
||||||
|
});
|
||||||
|
|
||||||
|
match port {
|
||||||
|
Some(Ok(p)) => Ok(Some(p)),
|
||||||
|
Some(Err(e)) => Err(e),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_name: &str,
|
||||||
|
switch_ports: Vec<PortLocation>,
|
||||||
|
) -> Result<u8, SwitchError> {
|
||||||
|
let channel_id = self
|
||||||
|
.brocade
|
||||||
|
.find_available_channel_id()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
||||||
|
|
||||||
|
self.brocade
|
||||||
|
.create_port_channel(channel_id, channel_name, &switch_ports)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
||||||
|
|
||||||
|
Ok(channel_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use assertor::*;
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use brocade::{
|
||||||
|
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
|
||||||
|
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||||
|
};
|
||||||
|
use harmony_types::switch::PortLocation;
|
||||||
|
|
||||||
|
use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_should_configure_ethernet_interfaces_as_access_ports() {
|
||||||
|
let first_interface = given_interface()
|
||||||
|
.with_port_location(PortLocation(1, 0, 1))
|
||||||
|
.build();
|
||||||
|
let second_interface = given_interface()
|
||||||
|
.with_port_location(PortLocation(1, 0, 4))
|
||||||
|
.build();
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![],
|
||||||
|
vec![first_interface.clone(), second_interface.clone()],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).contains_exactly(vec![
|
||||||
|
(first_interface.name.clone(), PortOperatingMode::Access),
|
||||||
|
(second_interface.name.clone(), PortOperatingMode::Access),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_with_an_already_configured_interface_should_skip_configuration() {
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
given_interface()
|
||||||
|
.with_operating_mode(Some(PortOperatingMode::Access))
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_with_a_disconnected_interface_should_skip_configuration() {
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
given_interface()
|
||||||
|
.with_status(InterfaceStatus::SfpAbsent)
|
||||||
|
.build(),
|
||||||
|
given_interface()
|
||||||
|
.with_status(InterfaceStatus::NotConnected)
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() {
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![
|
||||||
|
given_inter_switch_link()
|
||||||
|
.between(PortLocation(1, 0, 1), PortLocation(2, 0, 1))
|
||||||
|
.build(),
|
||||||
|
given_inter_switch_link()
|
||||||
|
.between(PortLocation(2, 0, 2), PortLocation(3, 0, 1))
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
given_interface()
|
||||||
|
.with_port_location(PortLocation(1, 0, 1))
|
||||||
|
.build(),
|
||||||
|
given_interface()
|
||||||
|
.with_port_location(PortLocation(2, 0, 1))
|
||||||
|
.build(),
|
||||||
|
given_interface()
|
||||||
|
.with_port_location(PortLocation(3, 0, 1))
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct FakeBrocadeClient {
|
||||||
|
stack_topology: Vec<InterSwitchLink>,
|
||||||
|
interfaces: Vec<InterfaceInfo>,
|
||||||
|
configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl BrocadeClient for FakeBrocadeClient {
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||||
|
Ok(self.stack_topology.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||||
|
Ok(self.interfaces.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
||||||
|
*configured_interfaces = interfaces;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
_channel_id: PortChannelId,
|
||||||
|
_channel_name: &str,
|
||||||
|
_ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FakeBrocadeClient {
|
||||||
|
fn new(stack_topology: Vec<InterSwitchLink>, interfaces: Vec<InterfaceInfo>) -> Self {
|
||||||
|
Self {
|
||||||
|
stack_topology,
|
||||||
|
interfaces,
|
||||||
|
configured_interfaces: Arc::new(Mutex::new(vec![])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InterfaceInfoBuilder {
|
||||||
|
port_location: Option<PortLocation>,
|
||||||
|
interface_type: Option<InterfaceType>,
|
||||||
|
operating_mode: Option<PortOperatingMode>,
|
||||||
|
status: Option<InterfaceStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InterfaceInfoBuilder {
|
||||||
|
fn build(&self) -> InterfaceInfo {
|
||||||
|
let interface_type = self
|
||||||
|
.interface_type
|
||||||
|
.clone()
|
||||||
|
.unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into()));
|
||||||
|
let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1));
|
||||||
|
let name = format!("{interface_type} {port_location}");
|
||||||
|
let status = self.status.clone().unwrap_or(InterfaceStatus::Connected);
|
||||||
|
|
||||||
|
InterfaceInfo {
|
||||||
|
name,
|
||||||
|
port_location,
|
||||||
|
interface_type,
|
||||||
|
operating_mode: self.operating_mode.clone(),
|
||||||
|
status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_port_location(self, port_location: PortLocation) -> Self {
|
||||||
|
Self {
|
||||||
|
port_location: Some(port_location),
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_operating_mode(self, operating_mode: Option<PortOperatingMode>) -> Self {
|
||||||
|
Self {
|
||||||
|
operating_mode,
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_status(self, status: InterfaceStatus) -> Self {
|
||||||
|
Self {
|
||||||
|
status: Some(status),
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InterSwitchLinkBuilder {
|
||||||
|
link: Option<(PortLocation, PortLocation)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InterSwitchLinkBuilder {
|
||||||
|
fn build(&self) -> InterSwitchLink {
|
||||||
|
let link = self
|
||||||
|
.link
|
||||||
|
.clone()
|
||||||
|
.unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1)));
|
||||||
|
|
||||||
|
InterSwitchLink {
|
||||||
|
local_port: link.0,
|
||||||
|
remote_port: Some(link.1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self {
|
||||||
|
Self {
|
||||||
|
link: Some((local_port, remote_port)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn given_interface() -> InterfaceInfoBuilder {
|
||||||
|
InterfaceInfoBuilder {
|
||||||
|
port_location: None,
|
||||||
|
interface_type: None,
|
||||||
|
operating_mode: None,
|
||||||
|
status: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn given_inter_switch_link() -> InterSwitchLinkBuilder {
|
||||||
|
InterSwitchLinkBuilder { link: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
|
|||||||
impl InventoryRepositoryFactory {
|
impl InventoryRepositoryFactory {
|
||||||
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
||||||
Ok(Box::new(
|
Ok(Box::new(
|
||||||
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
|
SqliteInventoryRepository::new(&DATABASE_URL).await?,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
pub mod brocade;
|
||||||
pub mod executors;
|
pub mod executors;
|
||||||
pub mod hp_ilo;
|
pub mod hp_ilo;
|
||||||
pub mod intel_amt;
|
pub mod intel_amt;
|
||||||
|
|||||||
@@ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
||||||
warn!(
|
|
||||||
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
|
|
||||||
);
|
|
||||||
let mut config = self.opnsense_config.write().await;
|
let mut config = self.opnsense_config.write().await;
|
||||||
|
let mut load_balancer = config.load_balancer();
|
||||||
|
|
||||||
let (frontend, backend, servers, healthcheck) =
|
let (frontend, backend, servers, healthcheck) =
|
||||||
harmony_load_balancer_service_to_haproxy_xml(service);
|
harmony_load_balancer_service_to_haproxy_xml(service);
|
||||||
let mut load_balancer = config.load_balancer();
|
|
||||||
load_balancer.add_backend(backend);
|
load_balancer.configure_service(frontend, backend, servers, healthcheck);
|
||||||
load_balancer.add_frontend(frontend);
|
|
||||||
load_balancer.add_servers(servers);
|
|
||||||
if let Some(healthcheck) = healthcheck {
|
|
||||||
load_balancer.add_healthcheck(healthcheck);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
.backends
|
.backends
|
||||||
.backends
|
.backends
|
||||||
.iter()
|
.iter()
|
||||||
.find(|b| b.uuid == frontend.default_backend);
|
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
|
||||||
|
|
||||||
let mut health_check = None;
|
let mut health_check = None;
|
||||||
match matching_backend {
|
match matching_backend {
|
||||||
@@ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
warn!(
|
warn!(
|
||||||
"HAProxy config could not find a matching backend for frontend {:?}",
|
"HAProxy config could not find a matching backend for frontend {frontend:?}"
|
||||||
frontend
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend(
|
|||||||
.servers
|
.servers
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|server| {
|
.filter_map(|server| {
|
||||||
|
let address = server.address.clone()?;
|
||||||
|
let port = server.port?;
|
||||||
|
|
||||||
if backend_servers.contains(&server.uuid.as_str()) {
|
if backend_servers.contains(&server.uuid.as_str()) {
|
||||||
return Some(BackendServer {
|
return Some(BackendServer { address, port });
|
||||||
address: server.address.clone(),
|
|
||||||
port: server.port,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
})
|
})
|
||||||
@@ -347,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
|
|||||||
name: format!("frontend_{}", service.listening_port),
|
name: format!("frontend_{}", service.listening_port),
|
||||||
bind: service.listening_port.to_string(),
|
bind: service.listening_port.to_string(),
|
||||||
mode: "tcp".to_string(), // TODO do not depend on health check here
|
mode: "tcp".to_string(), // TODO do not depend on health check here
|
||||||
default_backend: backend.uuid.clone(),
|
default_backend: Some(backend.uuid.clone()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
||||||
@@ -361,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
|
|||||||
uuid: Uuid::new_v4().to_string(),
|
uuid: Uuid::new_v4().to_string(),
|
||||||
name: format!("{}_{}", &server.address, &server.port),
|
name: format!("{}_{}", &server.address, &server.port),
|
||||||
enabled: 1,
|
enabled: 1,
|
||||||
address: server.address.clone(),
|
address: Some(server.address.clone()),
|
||||||
port: server.port,
|
port: Some(server.port),
|
||||||
mode: "active".to_string(),
|
mode: "active".to_string(),
|
||||||
server_type: "static".to_string(),
|
server_type: "static".to_string(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -385,8 +378,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "192.168.1.1".to_string(),
|
address: Some("192.168.1.1".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -411,8 +404,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "192.168.1.1".to_string(),
|
address: Some("192.168.1.1".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -431,8 +424,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "192.168.1.1".to_string(),
|
address: Some("192.168.1.1".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -453,16 +446,16 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "some-hostname.test.mcd".to_string(),
|
address: Some("some-hostname.test.mcd".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server2".to_string(),
|
uuid: "server2".to_string(),
|
||||||
address: "192.168.1.2".to_string(),
|
address: Some("192.168.1.2".to_string()),
|
||||||
port: 8080,
|
port: Some(8080),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ impl OPNSenseFirewall {
|
|||||||
self.host.ip
|
self.host.ip
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// panics : if the opnsense config file cannot be loaded by the underlying opnsense_config
|
||||||
|
/// crate
|
||||||
pub async fn new(host: LogicalHost, port: Option<u16>, username: &str, password: &str) -> Self {
|
pub async fn new(host: LogicalHost, port: Option<u16>, username: &str, password: &str) -> Self {
|
||||||
Self {
|
Self {
|
||||||
opnsense_config: Arc::new(RwLock::new(
|
opnsense_config: Arc::new(RwLock::new(
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_macros::hurl;
|
||||||
use kube::{Api, api::GroupVersionKind};
|
use kube::{Api, api::GroupVersionKind};
|
||||||
use log::{debug, warn};
|
use log::{debug, warn};
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
@@ -1051,7 +1052,7 @@ commitServer:
|
|||||||
install_only: false,
|
install_only: false,
|
||||||
repository: Some(HelmRepository::new(
|
repository: Some(HelmRepository::new(
|
||||||
"argo".to_string(),
|
"argo".to_string(),
|
||||||
url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(),
|
hurl!("https://argoproj.github.io/argo-helm"),
|
||||||
true,
|
true,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,11 @@ use crate::modules::application::{
|
|||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
|
use crate::modules::monitoring::grafana::grafana::Grafana;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||||
|
use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
|
||||||
|
ServiceMonitor, ServiceMonitorSpec,
|
||||||
|
};
|
||||||
use crate::topology::MultiTargetTopology;
|
use crate::topology::MultiTargetTopology;
|
||||||
use crate::topology::ingress::Ingress;
|
use crate::topology::ingress::Ingress;
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -14,7 +18,7 @@ use crate::{
|
|||||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
modules::prometheus::prometheus::PrometheusMonitoring,
|
||||||
topology::oberservability::monitoring::AlertReceiver,
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -22,6 +26,7 @@ use base64::{Engine as _, engine::general_purpose};
|
|||||||
use harmony_secret::SecretManager;
|
use harmony_secret::SecretManager;
|
||||||
use harmony_secret_derive::Secret;
|
use harmony_secret_derive::Secret;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -40,7 +45,8 @@ impl<
|
|||||||
+ TenantManager
|
+ TenantManager
|
||||||
+ K8sclient
|
+ K8sclient
|
||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
+ PrometheusMonitoring<CRDPrometheus>
|
||||||
|
+ Grafana
|
||||||
+ Ingress
|
+ Ingress
|
||||||
+ std::fmt::Debug,
|
+ std::fmt::Debug,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
@@ -57,10 +63,20 @@ impl<
|
|||||||
.unwrap_or_else(|| self.application.name());
|
.unwrap_or_else(|| self.application.name());
|
||||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||||
|
|
||||||
|
let app_service_monitor = ServiceMonitor {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(self.application.name()),
|
||||||
|
namespace: Some(namespace.clone()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: ServiceMonitorSpec::default(),
|
||||||
|
};
|
||||||
|
|
||||||
let mut alerting_score = ApplicationMonitoringScore {
|
let mut alerting_score = ApplicationMonitoringScore {
|
||||||
sender: CRDPrometheus {
|
sender: CRDPrometheus {
|
||||||
namespace: namespace.clone(),
|
namespace: namespace.clone(),
|
||||||
client: topology.k8s_client().await.unwrap(),
|
client: topology.k8s_client().await.unwrap(),
|
||||||
|
service_monitor: vec![app_service_monitor],
|
||||||
},
|
},
|
||||||
application: self.application.clone(),
|
application: self.application.clone(),
|
||||||
receivers: self.alert_receiver.clone(),
|
receivers: self.alert_receiver.clone(),
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use crate::{
|
|||||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
modules::prometheus::prometheus::PrometheusMonitoring,
|
||||||
topology::oberservability::monitoring::AlertReceiver,
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -42,7 +42,7 @@ impl<
|
|||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ Ingress
|
+ Ingress
|
||||||
+ std::fmt::Debug
|
+ std::fmt::Debug
|
||||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
+ PrometheusMonitoring<RHOBObservability>,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(
|
||||||
|
|||||||
209
harmony/src/modules/cert_manager/cluster_issuer.rs
Normal file
209
harmony/src/modules/cert_manager/cluster_issuer.rs
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use kube::{CustomResource, api::ObjectMeta};
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ClusterIssuerScore {
|
||||||
|
email: String,
|
||||||
|
server: String,
|
||||||
|
issuer_name: String,
|
||||||
|
namespace: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"ClusterIssuerScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(ClusterIssuerInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ClusterIssuerInterpret {
|
||||||
|
score: ClusterIssuerScore,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
self.apply_cluster_issuer(topology.k8s_client().await.unwrap())
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("ClusterIssuer")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterIssuerInterpret {
|
||||||
|
async fn validate_cert_manager(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let cert_manager = "cert-manager".to_string();
|
||||||
|
let operator_namespace = "openshift-operators".to_string();
|
||||||
|
match client
|
||||||
|
.get_deployment(&cert_manager, Some(&operator_namespace))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Some(deployment)) => {
|
||||||
|
if let Some(status) = deployment.status {
|
||||||
|
let ready_count = status.ready_replicas.unwrap_or(0);
|
||||||
|
if ready_count >= 1 {
|
||||||
|
return Ok(Outcome::success(format!(
|
||||||
|
"'{}' is ready with {} replica(s).",
|
||||||
|
&cert_manager, ready_count
|
||||||
|
)));
|
||||||
|
} else {
|
||||||
|
return Err(InterpretError::new(
|
||||||
|
"cert-manager operator not ready in cluster".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(InterpretError::new(format!(
|
||||||
|
"failed to get deployment status {} in ns {}",
|
||||||
|
&cert_manager, &operator_namespace
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => Err(InterpretError::new(format!(
|
||||||
|
"Deployment '{}' not found in namespace '{}'.",
|
||||||
|
&cert_manager, &operator_namespace
|
||||||
|
))),
|
||||||
|
Err(e) => Err(InterpretError::new(format!(
|
||||||
|
"Failed to query for deployment '{}': {}",
|
||||||
|
&cert_manager, e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> {
|
||||||
|
let issuer_name = &self.score.issuer_name;
|
||||||
|
let email = &self.score.email;
|
||||||
|
let server = &self.score.server;
|
||||||
|
let namespace = &self.score.namespace;
|
||||||
|
let cluster_issuer = ClusterIssuer {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(issuer_name.to_string()),
|
||||||
|
namespace: Some(namespace.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: ClusterIssuerSpec {
|
||||||
|
acme: AcmeSpec {
|
||||||
|
email: email.to_string(),
|
||||||
|
private_key_secret_ref: PrivateKeySecretRef {
|
||||||
|
name: issuer_name.to_string(),
|
||||||
|
},
|
||||||
|
server: server.to_string(),
|
||||||
|
solvers: vec![SolverSpec {
|
||||||
|
http01: Some(Http01Solver {
|
||||||
|
ingress: Http01Ingress {
|
||||||
|
class: "nginx".to_string(),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(cluster_issuer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn apply_cluster_issuer(
|
||||||
|
&self,
|
||||||
|
client: Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let namespace = self.score.namespace.clone();
|
||||||
|
self.validate_cert_manager(&client).await?;
|
||||||
|
let cluster_issuer = self.build_cluster_issuer().unwrap();
|
||||||
|
client
|
||||||
|
.apply_yaml(
|
||||||
|
&serde_yaml::to_value(cluster_issuer).unwrap(),
|
||||||
|
Some(&namespace),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"successfully deployed cluster operator: {} in namespace: {}",
|
||||||
|
self.score.issuer_name, self.score.namespace
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "cert-manager.io",
|
||||||
|
version = "v1",
|
||||||
|
kind = "ClusterIssuer",
|
||||||
|
plural = "clusterissuers"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ClusterIssuerSpec {
|
||||||
|
pub acme: AcmeSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct AcmeSpec {
|
||||||
|
pub email: String,
|
||||||
|
pub private_key_secret_ref: PrivateKeySecretRef,
|
||||||
|
pub server: String,
|
||||||
|
pub solvers: Vec<SolverSpec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct PrivateKeySecretRef {
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SolverSpec {
|
||||||
|
pub http01: Option<Http01Solver>,
|
||||||
|
// Other solver types (e.g., dns01) would go here as Options
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Http01Solver {
|
||||||
|
pub ingress: Http01Ingress,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Http01Ingress {
|
||||||
|
pub class: String,
|
||||||
|
}
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
use std::{collections::HashMap, str::FromStr};
|
use std::{collections::HashMap, str::FromStr};
|
||||||
|
|
||||||
|
use harmony_macros::hurl;
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
@@ -33,7 +34,7 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
|
|||||||
install_only: true,
|
install_only: true,
|
||||||
repository: Some(HelmRepository::new(
|
repository: Some(HelmRepository::new(
|
||||||
"jetstack".to_string(),
|
"jetstack".to_string(),
|
||||||
Url::parse("https://charts.jetstack.io").unwrap(),
|
hurl!("https://charts.jetstack.io"),
|
||||||
true,
|
true,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
|
pub mod cluster_issuer;
|
||||||
mod helm;
|
mod helm;
|
||||||
pub use helm::*;
|
pub use helm::*;
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use crate::score::Score;
|
|||||||
use crate::topology::{HelmCommand, Topology};
|
use crate::topology::{HelmCommand, Topology};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
use harmony_types::net::Url;
|
||||||
use helm_wrapper_rs;
|
use helm_wrapper_rs;
|
||||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
@@ -15,7 +16,6 @@ use std::path::Path;
|
|||||||
use std::process::{Command, Output, Stdio};
|
use std::process::{Command, Output, Stdio};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use temp_file::TempFile;
|
use temp_file::TempFile;
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct HelmRepository {
|
pub struct HelmRepository {
|
||||||
@@ -78,7 +78,8 @@ impl HelmChartInterpret {
|
|||||||
repo.name, repo.url, repo.force_update
|
repo.name, repo.url, repo.force_update
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
|
let repo_url = repo.url.to_string();
|
||||||
|
let mut add_args = vec!["repo", "add", &repo.name, &repo_url];
|
||||||
if repo.force_update {
|
if repo.force_update {
|
||||||
add_args.push("--force-update");
|
add_args.push("--force-update");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,364 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use log::debug;
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::io::ErrorKind;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::process::{Command, Output};
|
|
||||||
use temp_dir::{self, TempDir};
|
|
||||||
use temp_file::TempFile;
|
|
||||||
|
|
||||||
use crate::data::Version;
|
|
||||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
|
||||||
use crate::inventory::Inventory;
|
|
||||||
use crate::score::Score;
|
|
||||||
use crate::topology::{HelmCommand, K8sclient, Topology};
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct HelmCommandExecutor {
|
|
||||||
pub env: HashMap<String, String>,
|
|
||||||
pub path: Option<PathBuf>,
|
|
||||||
pub args: Vec<String>,
|
|
||||||
pub api_versions: Option<Vec<String>>,
|
|
||||||
pub kube_version: String,
|
|
||||||
pub debug: Option<bool>,
|
|
||||||
pub globals: HelmGlobals,
|
|
||||||
pub chart: HelmChart,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct HelmGlobals {
|
|
||||||
pub chart_home: Option<PathBuf>,
|
|
||||||
pub config_home: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct HelmChart {
|
|
||||||
pub name: String,
|
|
||||||
pub version: Option<String>,
|
|
||||||
pub repo: Option<String>,
|
|
||||||
pub release_name: Option<String>,
|
|
||||||
pub namespace: Option<String>,
|
|
||||||
pub additional_values_files: Vec<PathBuf>,
|
|
||||||
pub values_file: Option<PathBuf>,
|
|
||||||
pub values_inline: Option<String>,
|
|
||||||
pub include_crds: Option<bool>,
|
|
||||||
pub skip_hooks: Option<bool>,
|
|
||||||
pub api_versions: Option<Vec<String>>,
|
|
||||||
pub kube_version: Option<String>,
|
|
||||||
pub name_template: String,
|
|
||||||
pub skip_tests: Option<bool>,
|
|
||||||
pub debug: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HelmCommandExecutor {
|
|
||||||
pub fn generate(mut self) -> Result<String, std::io::Error> {
|
|
||||||
if self.globals.chart_home.is_none() {
|
|
||||||
self.globals.chart_home = Some(PathBuf::from("charts"));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self
|
|
||||||
.clone()
|
|
||||||
.chart
|
|
||||||
.clone()
|
|
||||||
.chart_exists_locally(self.clone().globals.chart_home.unwrap())
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
if self.chart.repo.is_none() {
|
|
||||||
return Err(std::io::Error::new(
|
|
||||||
ErrorKind::Other,
|
|
||||||
"Chart doesn't exist locally and no repo specified",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
self.clone().run_command(
|
|
||||||
self.chart
|
|
||||||
.clone()
|
|
||||||
.pull_command(self.globals.chart_home.clone().unwrap()),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let out = self.clone().run_command(
|
|
||||||
self.chart
|
|
||||||
.clone()
|
|
||||||
.helm_args(self.globals.chart_home.clone().unwrap()),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// TODO: don't use unwrap here
|
|
||||||
let s = String::from_utf8(out.stdout).unwrap();
|
|
||||||
debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
|
|
||||||
debug!("helm status: {}", out.status);
|
|
||||||
debug!("helm output: {s}");
|
|
||||||
|
|
||||||
let clean = s.split_once("---").unwrap().1;
|
|
||||||
|
|
||||||
Ok(clean.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn version(self) -> Result<String, std::io::Error> {
|
|
||||||
let out = self.run_command(vec![
|
|
||||||
"version".to_string(),
|
|
||||||
"-c".to_string(),
|
|
||||||
"--short".to_string(),
|
|
||||||
])?;
|
|
||||||
|
|
||||||
// TODO: don't use unwrap
|
|
||||||
Ok(String::from_utf8(out.stdout).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
|
|
||||||
if let Some(d) = self.debug {
|
|
||||||
if d {
|
|
||||||
args.push("--debug".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = if let Some(p) = self.path {
|
|
||||||
p
|
|
||||||
} else {
|
|
||||||
PathBuf::from("helm")
|
|
||||||
};
|
|
||||||
|
|
||||||
let config_home = match self.globals.config_home {
|
|
||||||
Some(p) => p,
|
|
||||||
None => PathBuf::from(TempDir::new()?.path()),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(yaml_str) = self.chart.values_inline {
|
|
||||||
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
|
|
||||||
self.chart
|
|
||||||
.additional_values_files
|
|
||||||
.push(PathBuf::from(tf.path()));
|
|
||||||
};
|
|
||||||
|
|
||||||
self.env.insert(
|
|
||||||
"HELM_CONFIG_HOME".to_string(),
|
|
||||||
config_home.to_str().unwrap().to_string(),
|
|
||||||
);
|
|
||||||
self.env.insert(
|
|
||||||
"HELM_CACHE_HOME".to_string(),
|
|
||||||
config_home.to_str().unwrap().to_string(),
|
|
||||||
);
|
|
||||||
self.env.insert(
|
|
||||||
"HELM_DATA_HOME".to_string(),
|
|
||||||
config_home.to_str().unwrap().to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Command::new(path).envs(self.env).args(args).output()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HelmChart {
|
|
||||||
pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
|
|
||||||
let chart_path =
|
|
||||||
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
|
|
||||||
|
|
||||||
if chart_path.exists() {
|
|
||||||
Some(chart_path)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
|
|
||||||
let mut args = vec![
|
|
||||||
"pull".to_string(),
|
|
||||||
"--untar".to_string(),
|
|
||||||
"--untardir".to_string(),
|
|
||||||
chart_home.to_str().unwrap().to_string(),
|
|
||||||
];
|
|
||||||
|
|
||||||
match self.repo {
|
|
||||||
Some(r) => {
|
|
||||||
if r.starts_with("oci://") {
|
|
||||||
args.push(
|
|
||||||
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
args.push("--repo".to_string());
|
|
||||||
args.push(r.to_string());
|
|
||||||
|
|
||||||
args.push(self.name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => args.push(self.name),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(v) = self.version {
|
|
||||||
args.push("--version".to_string());
|
|
||||||
args.push(v.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
args
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
|
|
||||||
let mut args: Vec<String> = vec!["template".to_string()];
|
|
||||||
|
|
||||||
match self.release_name {
|
|
||||||
Some(rn) => args.push(rn.to_string()),
|
|
||||||
None => args.push("--generate-name".to_string()),
|
|
||||||
}
|
|
||||||
|
|
||||||
args.push(
|
|
||||||
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
|
|
||||||
.to_str()
|
|
||||||
.unwrap()
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(n) = self.namespace {
|
|
||||||
args.push("--namespace".to_string());
|
|
||||||
args.push(n.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(f) = self.values_file {
|
|
||||||
args.push("-f".to_string());
|
|
||||||
args.push(f.to_str().unwrap().to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
for f in self.additional_values_files {
|
|
||||||
args.push("-f".to_string());
|
|
||||||
args.push(f.to_str().unwrap().to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(vv) = self.api_versions {
|
|
||||||
for v in vv {
|
|
||||||
args.push("--api-versions".to_string());
|
|
||||||
args.push(v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(kv) = self.kube_version {
|
|
||||||
args.push("--kube-version".to_string());
|
|
||||||
args.push(kv);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(crd) = self.include_crds {
|
|
||||||
if crd {
|
|
||||||
args.push("--include-crds".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(st) = self.skip_tests {
|
|
||||||
if st {
|
|
||||||
args.push("--skip-tests".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(sh) = self.skip_hooks {
|
|
||||||
if sh {
|
|
||||||
args.push("--no-hooks".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(d) = self.debug {
|
|
||||||
if d {
|
|
||||||
args.push("--debug".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
args
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct HelmChartScoreV2 {
|
|
||||||
pub chart: HelmChart,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(HelmChartInterpretV2 {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"{} {} HelmChartScoreV2",
|
|
||||||
self.chart
|
|
||||||
.release_name
|
|
||||||
.clone()
|
|
||||||
.unwrap_or("Unknown".to_string()),
|
|
||||||
self.chart.name
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
|
||||||
pub struct HelmChartInterpretV2 {
|
|
||||||
pub score: HelmChartScoreV2,
|
|
||||||
}
|
|
||||||
impl HelmChartInterpretV2 {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
_topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let _ns = self
|
|
||||||
.score
|
|
||||||
.chart
|
|
||||||
.namespace
|
|
||||||
.as_ref()
|
|
||||||
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
|
|
||||||
|
|
||||||
let helm_executor = HelmCommandExecutor {
|
|
||||||
env: HashMap::new(),
|
|
||||||
path: None,
|
|
||||||
args: vec![],
|
|
||||||
api_versions: None,
|
|
||||||
kube_version: "v1.33.0".to_string(),
|
|
||||||
debug: Some(false),
|
|
||||||
globals: HelmGlobals {
|
|
||||||
chart_home: None,
|
|
||||||
config_home: None,
|
|
||||||
},
|
|
||||||
chart: self.score.chart.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// let mut helm_options = Vec::new();
|
|
||||||
// if self.score.create_namespace {
|
|
||||||
// helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
|
|
||||||
// }
|
|
||||||
|
|
||||||
let res = helm_executor.generate();
|
|
||||||
|
|
||||||
let _output = match res {
|
|
||||||
Ok(output) => output,
|
|
||||||
Err(err) => return Err(InterpretError::new(err.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
|
|
||||||
|
|
||||||
// let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
|
|
||||||
|
|
||||||
// let client = topology
|
|
||||||
// .k8s_client()
|
|
||||||
// .await
|
|
||||||
// .expect("Environment should provide enough information to instanciate a client")
|
|
||||||
// .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
|
|
||||||
// match client.apply_yaml(output) {
|
|
||||||
// Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
|
|
||||||
// Err(e) => return Err(InterpretError::new(e)),
|
|
||||||
// }
|
|
||||||
|
|
||||||
Ok(Outcome::success("Helm chart deployed".to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::HelmCommand
|
|
||||||
}
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,2 +1 @@
|
|||||||
pub mod chart;
|
pub mod chart;
|
||||||
pub mod command;
|
|
||||||
|
|||||||
@@ -38,13 +38,15 @@ impl<
|
|||||||
+ 'static
|
+ 'static
|
||||||
+ Send
|
+ Send
|
||||||
+ Clone,
|
+ Clone,
|
||||||
T: Topology,
|
T: Topology + K8sclient,
|
||||||
> Score<T> for K8sResourceScore<K>
|
> Score<T> for K8sResourceScore<K>
|
||||||
where
|
where
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
todo!()
|
Box::new(K8sResourceInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -1,21 +1,23 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use log::debug;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
interpret::Interpret,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::{
|
modules::{
|
||||||
application::Application,
|
application::Application,
|
||||||
monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
monitoring::{
|
||||||
prometheus::prometheus::PrometheusApplicationMonitoring,
|
grafana::grafana::Grafana, kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
||||||
|
},
|
||||||
|
prometheus::prometheus::PrometheusMonitoring,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
topology::{
|
||||||
|
K8sclient, Topology,
|
||||||
|
oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct ApplicationMonitoringScore {
|
pub struct ApplicationMonitoringScore {
|
||||||
@@ -24,12 +26,16 @@ pub struct ApplicationMonitoringScore {
|
|||||||
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Score<T>
|
||||||
for ApplicationMonitoringScore
|
for ApplicationMonitoringScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
Box::new(ApplicationMonitoringInterpret {
|
debug!("creating alerting interpret");
|
||||||
score: self.clone(),
|
Box::new(AlertingInterpret {
|
||||||
|
sender: self.sender.clone(),
|
||||||
|
receivers: self.receivers.clone(),
|
||||||
|
rules: vec![],
|
||||||
|
scrape_targets: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,55 +46,3 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ApplicationMonitoringInterpret {
|
|
||||||
score: ApplicationMonitoringScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
|
||||||
for ApplicationMonitoringInterpret
|
|
||||||
{
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let result = topology
|
|
||||||
.install_prometheus(
|
|
||||||
&self.score.sender,
|
|
||||||
inventory,
|
|
||||||
Some(self.score.receivers.clone()),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome {
|
|
||||||
PreparationOutcome::Success { details: _ } => {
|
|
||||||
Ok(Outcome::success("Prometheus installed".into()))
|
|
||||||
}
|
|
||||||
PreparationOutcome::Noop => {
|
|
||||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(err) => Err(InterpretError::from(err)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::ApplicationMonitoring
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ use crate::{
|
|||||||
monitoring::kube_prometheus::crd::{
|
monitoring::kube_prometheus::crd::{
|
||||||
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
||||||
},
|
},
|
||||||
prometheus::prometheus::PrometheusApplicationMonitoring,
|
prometheus::prometheus::PrometheusMonitoring,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||||
@@ -26,7 +26,7 @@ pub struct ApplicationRHOBMonitoringScore {
|
|||||||
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Score<T>
|
||||||
for ApplicationRHOBMonitoringScore
|
for ApplicationRHOBMonitoringScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
@@ -49,7 +49,7 @@ pub struct ApplicationRHOBMonitoringInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Interpret<T>
|
||||||
for ApplicationRHOBMonitoringInterpret
|
for ApplicationRHOBMonitoringInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
|
|||||||
17
harmony/src/modules/monitoring/grafana/grafana.rs
Normal file
17
harmony/src/modules/monitoring/grafana/grafana.rs
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use k8s_openapi::Resource;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
inventory::Inventory,
|
||||||
|
topology::{PreparationError, PreparationOutcome},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Grafana {
|
||||||
|
async fn ensure_grafana_operator(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
) -> Result<PreparationOutcome, PreparationError>;
|
||||||
|
|
||||||
|
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError>;
|
||||||
|
}
|
||||||
@@ -1,27 +1,28 @@
|
|||||||
|
use harmony_macros::hurl;
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use std::str::FromStr;
|
use std::{collections::HashMap, str::FromStr};
|
||||||
|
|
||||||
use crate::modules::helm::chart::HelmChartScore;
|
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||||
|
|
||||||
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
|
|
||||||
let values = r#"
|
|
||||||
rbac:
|
|
||||||
namespaced: true
|
|
||||||
sidecar:
|
|
||||||
dashboards:
|
|
||||||
enabled: true
|
|
||||||
"#
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
|
pub fn grafana_helm_chart_score(ns: &str, namespace_scope: bool) -> HelmChartScore {
|
||||||
|
let mut values_overrides = HashMap::new();
|
||||||
|
values_overrides.insert(
|
||||||
|
NonBlankString::from_str("namespaceScope").unwrap(),
|
||||||
|
namespace_scope.to_string(),
|
||||||
|
);
|
||||||
HelmChartScore {
|
HelmChartScore {
|
||||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
||||||
release_name: NonBlankString::from_str("grafana").unwrap(),
|
release_name: NonBlankString::from_str("grafana-operator").unwrap(),
|
||||||
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
|
chart_name: NonBlankString::from_str("grafana/grafana-operator").unwrap(),
|
||||||
chart_version: None,
|
chart_version: None,
|
||||||
values_overrides: None,
|
values_overrides: Some(values_overrides),
|
||||||
values_yaml: Some(values.to_string()),
|
values_yaml: None,
|
||||||
create_namespace: true,
|
create_namespace: true,
|
||||||
install_only: true,
|
install_only: true,
|
||||||
repository: None,
|
repository: Some(HelmRepository::new(
|
||||||
|
"grafana".to_string(),
|
||||||
|
hurl!("https://grafana.github.io/helm-charts"),
|
||||||
|
true,
|
||||||
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +1,2 @@
|
|||||||
|
pub mod grafana;
|
||||||
pub mod helm;
|
pub mod helm;
|
||||||
|
|||||||
@@ -1,12 +1,25 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use kube::CustomResource;
|
use kube::CustomResource;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::topology::{
|
use crate::{
|
||||||
k8s::K8sClient,
|
interpret::{InterpretError, Outcome},
|
||||||
oberservability::monitoring::{AlertReceiver, AlertSender},
|
inventory::Inventory,
|
||||||
|
modules::{
|
||||||
|
monitoring::{
|
||||||
|
grafana::grafana::Grafana, kube_prometheus::crd::service_monitor::ServiceMonitor,
|
||||||
|
},
|
||||||
|
prometheus::prometheus::PrometheusMonitoring,
|
||||||
|
},
|
||||||
|
topology::{
|
||||||
|
K8sclient, Topology,
|
||||||
|
installable::Installable,
|
||||||
|
k8s::K8sClient,
|
||||||
|
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
@@ -26,6 +39,7 @@ pub struct AlertmanagerConfigSpec {
|
|||||||
pub struct CRDPrometheus {
|
pub struct CRDPrometheus {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
pub client: Arc<K8sClient>,
|
pub client: Arc<K8sClient>,
|
||||||
|
pub service_monitor: Vec<ServiceMonitor>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AlertSender for CRDPrometheus {
|
impl AlertSender for CRDPrometheus {
|
||||||
@@ -40,6 +54,12 @@ impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Clone for Box<dyn ScrapeTarget<CRDPrometheus>> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
self.clone_box()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
@@ -48,3 +68,24 @@ impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus> + Grafana> Installable<T>
|
||||||
|
for CRDPrometheus
|
||||||
|
{
|
||||||
|
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
|
topology.ensure_grafana_operator(inventory).await?;
|
||||||
|
topology.ensure_prometheus_operator(self, inventory).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_installed(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
topology.install_grafana().await?;
|
||||||
|
topology.install_prometheus(&self, inventory, None).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -103,9 +103,34 @@ pub struct GrafanaDashboardSpec {
|
|||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub resync_period: Option<String>,
|
pub resync_period: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub datasources: Option<Vec<GrafanaDashboardDatasource>>,
|
||||||
|
|
||||||
pub instance_selector: LabelSelector,
|
pub instance_selector: LabelSelector,
|
||||||
|
|
||||||
pub json: String,
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub json: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub grafana_com: Option<GrafanaCom>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaDashboardDatasource {
|
||||||
|
pub input_name: String,
|
||||||
|
pub datasource_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaCom {
|
||||||
|
pub id: u32,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub revision: Option<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
@@ -126,20 +151,79 @@ pub struct GrafanaDatasourceSpec {
|
|||||||
pub allow_cross_namespace_import: Option<bool>,
|
pub allow_cross_namespace_import: Option<bool>,
|
||||||
|
|
||||||
pub datasource: GrafanaDatasourceConfig,
|
pub datasource: GrafanaDatasourceConfig,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub values_from: Option<Vec<GrafanaValueFrom>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaValueFrom {
|
||||||
|
pub target_path: String,
|
||||||
|
pub value_from: GrafanaValueSource,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaValueSource {
|
||||||
|
pub secret_key_ref: GrafanaSecretKeyRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaSecretKeyRef {
|
||||||
|
pub name: String,
|
||||||
|
pub key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct GrafanaDatasourceConfig {
|
pub struct GrafanaDatasourceConfig {
|
||||||
pub access: String,
|
pub access: String,
|
||||||
pub database: Option<String>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub json_data: Option<BTreeMap<String, String>>,
|
pub database: Option<String>,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub r#type: String,
|
pub r#type: String,
|
||||||
pub url: String,
|
pub url: String,
|
||||||
|
/// Represents jsonData in the GrafanaDatasource spec
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub json_data: Option<GrafanaDatasourceJsonData>,
|
||||||
|
|
||||||
|
/// Represents secureJsonData (secrets)
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub secure_json_data: Option<GrafanaDatasourceSecureJsonData>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub is_default: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub editable: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaDatasourceJsonData {
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub time_interval: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub http_header_name1: Option<String>,
|
||||||
|
|
||||||
|
/// Disable TLS skip verification (false = verify)
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tls_skip_verify: Option<bool>,
|
||||||
|
|
||||||
|
/// Auth type - set to "forward" for OpenShift OAuth identity
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub oauth_pass_thru: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaDatasourceSecureJsonData {
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub http_header_value1: Option<String>,
|
||||||
|
}
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
||||||
|
|||||||
@@ -0,0 +1,187 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use kube::CustomResource;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
modules::monitoring::kube_prometheus::crd::{
|
||||||
|
crd_alertmanager_config::CRDPrometheus, crd_prometheuses::LabelSelector,
|
||||||
|
},
|
||||||
|
topology::oberservability::monitoring::ScrapeTarget,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "monitoring.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "ScrapeConfig",
|
||||||
|
plural = "scrapeconfigs",
|
||||||
|
namespaced
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ScrapeConfigSpec {
|
||||||
|
/// List of static configurations.
|
||||||
|
pub static_configs: Option<Vec<StaticConfig>>,
|
||||||
|
|
||||||
|
/// Kubernetes service discovery.
|
||||||
|
pub kubernetes_sd_configs: Option<Vec<KubernetesSDConfig>>,
|
||||||
|
|
||||||
|
/// HTTP-based service discovery.
|
||||||
|
pub http_sd_configs: Option<Vec<HttpSDConfig>>,
|
||||||
|
|
||||||
|
/// File-based service discovery.
|
||||||
|
pub file_sd_configs: Option<Vec<FileSDConfig>>,
|
||||||
|
|
||||||
|
/// DNS-based service discovery.
|
||||||
|
pub dns_sd_configs: Option<Vec<DnsSDConfig>>,
|
||||||
|
|
||||||
|
/// Consul service discovery.
|
||||||
|
pub consul_sd_configs: Option<Vec<ConsulSDConfig>>,
|
||||||
|
|
||||||
|
/// Relabeling configuration applied to discovered targets.
|
||||||
|
pub relabel_configs: Option<Vec<RelabelConfig>>,
|
||||||
|
|
||||||
|
/// Metric relabeling configuration applied to scraped samples.
|
||||||
|
pub metric_relabel_configs: Option<Vec<RelabelConfig>>,
|
||||||
|
|
||||||
|
/// Path to scrape metrics from (defaults to `/metrics`).
|
||||||
|
pub metrics_path: Option<String>,
|
||||||
|
|
||||||
|
/// Interval at which Prometheus scrapes targets (e.g., "30s").
|
||||||
|
pub scrape_interval: Option<String>,
|
||||||
|
|
||||||
|
/// Timeout for scraping (e.g., "10s").
|
||||||
|
pub scrape_timeout: Option<String>,
|
||||||
|
|
||||||
|
/// Optional job name override.
|
||||||
|
pub job_name: Option<String>,
|
||||||
|
|
||||||
|
/// Optional scheme (http or https).
|
||||||
|
pub scheme: Option<String>,
|
||||||
|
|
||||||
|
/// Authorization paramaters for snmp walk
|
||||||
|
pub params: Option<Params>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Static configuration section of a ScrapeConfig.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct StaticConfig {
|
||||||
|
pub targets: Vec<String>,
|
||||||
|
|
||||||
|
pub labels: Option<LabelSelector>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Relabeling configuration for target or metric relabeling.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RelabelConfig {
|
||||||
|
pub source_labels: Option<Vec<String>>,
|
||||||
|
pub separator: Option<String>,
|
||||||
|
pub target_label: Option<String>,
|
||||||
|
pub regex: Option<String>,
|
||||||
|
pub modulus: Option<u64>,
|
||||||
|
pub replacement: Option<String>,
|
||||||
|
pub action: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Kubernetes service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct KubernetesSDConfig {
|
||||||
|
///"pod", "service", "endpoints"pub role: String,
|
||||||
|
pub namespaces: Option<NamespaceSelector>,
|
||||||
|
pub selectors: Option<Vec<LabelSelector>>,
|
||||||
|
pub api_server: Option<String>,
|
||||||
|
pub bearer_token_file: Option<String>,
|
||||||
|
pub tls_config: Option<TLSConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Namespace selector for Kubernetes service discovery.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct NamespaceSelector {
|
||||||
|
pub any: Option<bool>,
|
||||||
|
pub match_names: Option<Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// HTTP-based service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct HttpSDConfig {
|
||||||
|
pub url: String,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
pub basic_auth: Option<BasicAuth>,
|
||||||
|
pub authorization: Option<Authorization>,
|
||||||
|
pub tls_config: Option<TLSConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// File-based service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct FileSDConfig {
|
||||||
|
pub files: Vec<String>,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DNS-based service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct DnsSDConfig {
|
||||||
|
pub names: Vec<String>,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
pub type_: Option<String>, // SRV, A, AAAA
|
||||||
|
pub port: Option<u16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consul service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ConsulSDConfig {
|
||||||
|
pub server: String,
|
||||||
|
pub services: Option<Vec<String>>,
|
||||||
|
pub scheme: Option<String>,
|
||||||
|
pub datacenter: Option<String>,
|
||||||
|
pub tag_separator: Option<String>,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
pub tls_config: Option<TLSConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Basic authentication credentials.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct BasicAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: Option<String>,
|
||||||
|
pub password_file: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bearer token or other auth mechanisms.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Authorization {
|
||||||
|
pub credentials: Option<String>,
|
||||||
|
pub credentials_file: Option<String>,
|
||||||
|
pub type_: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TLS configuration for secure scraping.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct TLSConfig {
|
||||||
|
pub ca_file: Option<String>,
|
||||||
|
pub cert_file: Option<String>,
|
||||||
|
pub key_file: Option<String>,
|
||||||
|
pub server_name: Option<String>,
|
||||||
|
pub insecure_skip_verify: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Authorization parameters for SNMP walk.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Params {
|
||||||
|
pub auth: Option<Vec<String>>,
|
||||||
|
pub module: Option<Vec<String>>,
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@ pub mod crd_default_rules;
|
|||||||
pub mod crd_grafana;
|
pub mod crd_grafana;
|
||||||
pub mod crd_prometheus_rules;
|
pub mod crd_prometheus_rules;
|
||||||
pub mod crd_prometheuses;
|
pub mod crd_prometheuses;
|
||||||
|
pub mod crd_scrape_config;
|
||||||
pub mod grafana_default_dashboard;
|
pub mod grafana_default_dashboard;
|
||||||
pub mod grafana_operator;
|
pub mod grafana_operator;
|
||||||
pub mod prometheus_operator;
|
pub mod prometheus_operator;
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
|||||||
sender: KubePrometheus { config },
|
sender: KubePrometheus { config },
|
||||||
receivers: self.receivers.clone(),
|
receivers: self.receivers.clone(),
|
||||||
rules: self.rules.clone(),
|
rules: self.rules.clone(),
|
||||||
|
scrape_targets: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -6,3 +6,4 @@ pub mod kube_prometheus;
|
|||||||
pub mod ntfy;
|
pub mod ntfy;
|
||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
|
pub mod scrape_target;
|
||||||
|
|||||||
@@ -100,11 +100,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
|||||||
|
|
||||||
info!("deploying ntfy...");
|
info!("deploying ntfy...");
|
||||||
client
|
client
|
||||||
.wait_until_deployment_ready(
|
.wait_until_deployment_ready("ntfy", Some(self.score.namespace.as_str()), None)
|
||||||
"ntfy".to_string(),
|
|
||||||
Some(self.score.namespace.as_str()),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
info!("ntfy deployed");
|
info!("ntfy deployed");
|
||||||
|
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ impl Prometheus {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ns) = namespace.as_deref() {
|
if let Some(ns) = namespace.as_deref() {
|
||||||
grafana_helm_chart_score(ns)
|
grafana_helm_chart_score(ns, false)
|
||||||
.interpret(inventory, topology)
|
.interpret(inventory, topology)
|
||||||
.await
|
.await
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
1
harmony/src/modules/monitoring/scrape_target/mod.rs
Normal file
1
harmony/src/modules/monitoring/scrape_target/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod server;
|
||||||
80
harmony/src/modules/monitoring/scrape_target/server.rs
Normal file
80
harmony/src/modules/monitoring/scrape_target/server.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
interpret::{InterpretError, Outcome},
|
||||||
|
modules::monitoring::kube_prometheus::crd::{
|
||||||
|
crd_alertmanager_config::CRDPrometheus,
|
||||||
|
crd_scrape_config::{Params, RelabelConfig, ScrapeConfig, ScrapeConfigSpec, StaticConfig},
|
||||||
|
},
|
||||||
|
topology::oberservability::monitoring::ScrapeTarget,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct Server {
|
||||||
|
pub name: String,
|
||||||
|
pub ip: IpAddr,
|
||||||
|
pub auth: String,
|
||||||
|
pub module: String,
|
||||||
|
pub domain: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl ScrapeTarget<CRDPrometheus> for Server {
|
||||||
|
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||||
|
let scrape_config_spec = ScrapeConfigSpec {
|
||||||
|
static_configs: Some(vec![StaticConfig {
|
||||||
|
targets: vec![self.ip.to_string()],
|
||||||
|
labels: None,
|
||||||
|
}]),
|
||||||
|
scrape_interval: Some("2m".to_string()),
|
||||||
|
kubernetes_sd_configs: None,
|
||||||
|
http_sd_configs: None,
|
||||||
|
file_sd_configs: None,
|
||||||
|
dns_sd_configs: None,
|
||||||
|
params: Some(Params {
|
||||||
|
auth: Some(vec![self.auth.clone()]),
|
||||||
|
module: Some(vec![self.module.clone()]),
|
||||||
|
}),
|
||||||
|
consul_sd_configs: None,
|
||||||
|
relabel_configs: Some(vec![RelabelConfig {
|
||||||
|
action: None,
|
||||||
|
source_labels: Some(vec!["__address__".to_string()]),
|
||||||
|
separator: None,
|
||||||
|
target_label: Some("__param_target".to_string()),
|
||||||
|
regex: None,
|
||||||
|
replacement: Some(format!("snmp.{}:31080", self.domain.clone())),
|
||||||
|
modulus: None,
|
||||||
|
}]),
|
||||||
|
metric_relabel_configs: None,
|
||||||
|
metrics_path: Some("/snmp".to_string()),
|
||||||
|
scrape_timeout: Some("2m".to_string()),
|
||||||
|
job_name: Some(format!("snmp_exporter/cloud/{}", self.name.clone())),
|
||||||
|
scheme: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let scrape_config = ScrapeConfig {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(self.name.clone()),
|
||||||
|
namespace: Some(sender.namespace.clone()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: scrape_config_spec,
|
||||||
|
};
|
||||||
|
sender
|
||||||
|
.client
|
||||||
|
.apply(&scrape_config, Some(&sender.namespace.clone()))
|
||||||
|
.await?;
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"installed scrape target {}",
|
||||||
|
self.name.clone()
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clone_box(&self) -> Box<dyn ScrapeTarget<CRDPrometheus>> {
|
||||||
|
Box::new(self.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -28,7 +28,7 @@ pub struct OKDSetup03ControlPlaneScore {}
|
|||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
|
Box::new(OKDSetup03ControlPlaneInterpret::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
@@ -38,17 +38,15 @@ impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct OKDSetup03ControlPlaneInterpret {
|
pub struct OKDSetup03ControlPlaneInterpret {
|
||||||
score: OKDSetup03ControlPlaneScore,
|
|
||||||
version: Version,
|
version: Version,
|
||||||
status: InterpretStatus,
|
status: InterpretStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OKDSetup03ControlPlaneInterpret {
|
impl OKDSetup03ControlPlaneInterpret {
|
||||||
pub fn new(score: OKDSetup03ControlPlaneScore) -> Self {
|
pub fn new() -> Self {
|
||||||
let version = Version::from("1.0.0").unwrap();
|
let version = Version::from("1.0.0").unwrap();
|
||||||
Self {
|
Self {
|
||||||
version,
|
version,
|
||||||
score,
|
|
||||||
status: InterpretStatus::QUEUED,
|
status: InterpretStatus::QUEUED,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -159,7 +157,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
}
|
}
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
debug!("[ControlPlane] iPXE content template:\n{}", content);
|
debug!("[ControlPlane] iPXE content template:\n{content}");
|
||||||
|
|
||||||
// Create and apply an iPXE boot file for each node.
|
// Create and apply an iPXE boot file for each node.
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
@@ -189,16 +187,13 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
/// Prompts the user to reboot the target control plane nodes.
|
/// Prompts the user to reboot the target control plane nodes.
|
||||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||||
info!(
|
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
|
||||||
"[ControlPlane] Requesting reboot for control plane nodes: {:?}",
|
|
||||||
node_ids
|
|
||||||
);
|
|
||||||
|
|
||||||
let confirmation = inquire::Confirm::new(
|
let confirmation = inquire::Confirm::new(
|
||||||
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
||||||
)
|
)
|
||||||
.prompt()
|
.prompt()
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
|
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||||
|
|
||||||
if !confirmation {
|
if !confirmation {
|
||||||
return Err(InterpretError::new(
|
return Err(InterpretError::new(
|
||||||
@@ -208,19 +203,6 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Placeholder for automating network bonding configuration.
|
|
||||||
async fn persist_network_bond(&self) -> Result<(), InterpretError> {
|
|
||||||
// Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
|
|
||||||
info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP");
|
|
||||||
inquire::Confirm::new(
|
|
||||||
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
|
|
||||||
)
|
|
||||||
.prompt()
|
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -259,9 +241,6 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
|||||||
// 4. Reboot the nodes to start the OS installation.
|
// 4. Reboot the nodes to start the OS installation.
|
||||||
self.reboot_targets(&nodes).await?;
|
self.reboot_targets(&nodes).await?;
|
||||||
|
|
||||||
// 5. Placeholder for post-boot network configuration (e.g., bonding).
|
|
||||||
self.persist_network_bond().await?;
|
|
||||||
|
|
||||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||||
// and for the cluster operators to become available. This would be similar to
|
// and for the cluster operators to become available. This would be similar to
|
||||||
// the `wait-for bootstrap-complete` command.
|
// the `wait-for bootstrap-complete` command.
|
||||||
|
|||||||
@@ -77,6 +77,8 @@ impl OKDBootstrapLoadBalancerScore {
|
|||||||
address: topology.bootstrap_host.ip.to_string(),
|
address: topology.bootstrap_host.ip.to_string(),
|
||||||
port,
|
port,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
backend.dedup();
|
||||||
backend
|
backend
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
130
harmony/src/modules/okd/bootstrap_persist_network_bond.rs
Normal file
130
harmony/src/modules/okd/bootstrap_persist_network_bond.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::okd::host_network::HostNetworkConfigurationScore,
|
||||||
|
score::Score,
|
||||||
|
topology::HAClusterTopology,
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::info;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
// Persist Network Bond
|
||||||
|
// - Persist bonding via NMState
|
||||||
|
// - Persist port channels on the Switch
|
||||||
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
|
pub struct OKDSetupPersistNetworkBondScore {}
|
||||||
|
|
||||||
|
impl Score<HAClusterTopology> for OKDSetupPersistNetworkBondScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
|
Box::new(OKDSetupPersistNetworkBondInterpet::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"OKDSetupPersistNetworkBondScore".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OKDSetupPersistNetworkBondInterpet {
|
||||||
|
version: Version,
|
||||||
|
status: InterpretStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OKDSetupPersistNetworkBondInterpet {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let version = Version::from("1.0.0").unwrap();
|
||||||
|
Self {
|
||||||
|
version,
|
||||||
|
status: InterpretStatus::QUEUED,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
||||||
|
/// It will trigger discovery if not enough hosts are found.
|
||||||
|
async fn get_nodes(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
_topology: &HAClusterTopology,
|
||||||
|
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||||
|
const REQUIRED_HOSTS: usize = 3;
|
||||||
|
let repo = InventoryRepositoryFactory::build().await?;
|
||||||
|
let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||||
|
|
||||||
|
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||||
|
Err(InterpretError::new(format!(
|
||||||
|
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
||||||
|
REQUIRED_HOSTS,
|
||||||
|
control_plane_hosts.len()
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
// Take exactly the number of required hosts to ensure consistency.
|
||||||
|
Ok(control_plane_hosts
|
||||||
|
.into_iter()
|
||||||
|
.take(REQUIRED_HOSTS)
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn persist_network_bond(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
hosts: &Vec<PhysicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
info!("Ensuring persistent bonding");
|
||||||
|
|
||||||
|
let score = HostNetworkConfigurationScore {
|
||||||
|
hosts: hosts.clone(),
|
||||||
|
};
|
||||||
|
score.interpret(inventory, topology).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Interpret<HAClusterTopology> for OKDSetupPersistNetworkBondInterpet {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("OKDSetupPersistNetworkBondInterpet")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
self.version.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
self.status.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let nodes = self.get_nodes(inventory, topology).await?;
|
||||||
|
|
||||||
|
let res = self.persist_network_bond(inventory, topology, &nodes).await;
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok(_) => Ok(Outcome::success(
|
||||||
|
"Network bond successfully persisted".into(),
|
||||||
|
)),
|
||||||
|
Err(_) => Err(InterpretError::new(
|
||||||
|
"Failed to persist network bond".to_string(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
1
harmony/src/modules/okd/crd/mod.rs
Normal file
1
harmony/src/modules/okd/crd/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod nmstate;
|
||||||
322
harmony/src/modules/okd/crd/nmstate.rs
Normal file
322
harmony/src/modules/okd/crd/nmstate.rs
Normal file
@@ -0,0 +1,322 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use kube::CustomResource;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "nmstate.io",
|
||||||
|
version = "v1",
|
||||||
|
kind = "NMState",
|
||||||
|
plural = "nmstates",
|
||||||
|
namespaced = false
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct NMStateSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub probe_configuration: Option<ProbeConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NMState {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
metadata: Default::default(),
|
||||||
|
spec: NMStateSpec {
|
||||||
|
probe_configuration: None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ProbeConfig {
|
||||||
|
pub dns: ProbeDns,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ProbeDns {
|
||||||
|
pub host: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "nmstate.io",
|
||||||
|
version = "v1",
|
||||||
|
kind = "NodeNetworkConfigurationPolicy",
|
||||||
|
namespaced
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct NodeNetworkConfigurationPolicySpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node_selector: Option<BTreeMap<String, String>>,
|
||||||
|
pub desired_state: DesiredStateSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct DesiredStateSpec {
|
||||||
|
pub interfaces: Vec<InterfaceSpec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct InterfaceSpec {
|
||||||
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub description: Option<String>,
|
||||||
|
pub r#type: String,
|
||||||
|
pub state: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mac_address: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub copy_mac_from: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mtu: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub controller: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ipv4: Option<IpStackSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ipv6: Option<IpStackSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ethernet: Option<EthernetSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub link_aggregation: Option<BondSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub vlan: Option<VlanSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub vxlan: Option<VxlanSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mac_vtap: Option<MacVtapSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mac_vlan: Option<MacVlanSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub infiniband: Option<InfinibandSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub linux_bridge: Option<LinuxBridgeSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ovs_bridge: Option<OvsBridgeSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ethtool: Option<EthtoolSpec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct IpStackSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub enabled: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub dhcp: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub autoconf: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub address: Option<Vec<IpAddressSpec>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub auto_dns: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub auto_gateway: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub auto_routes: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub dhcp_client_id: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub dhcp_duid: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct IpAddressSpec {
|
||||||
|
pub ip: String,
|
||||||
|
pub prefix_length: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct EthernetSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub speed: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub duplex: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub auto_negotiation: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct BondSpec {
|
||||||
|
pub mode: String,
|
||||||
|
pub ports: Vec<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub options: Option<BTreeMap<String, Value>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct VlanSpec {
|
||||||
|
pub base_iface: String,
|
||||||
|
pub id: u16,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub protocol: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct VxlanSpec {
|
||||||
|
pub base_iface: String,
|
||||||
|
pub id: u32,
|
||||||
|
pub remote: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub local: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub learning: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub destination_port: Option<u16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct MacVtapSpec {
|
||||||
|
pub base_iface: String,
|
||||||
|
pub mode: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub promiscuous: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct MacVlanSpec {
|
||||||
|
pub base_iface: String,
|
||||||
|
pub mode: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub promiscuous: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct InfinibandSpec {
|
||||||
|
pub base_iface: String,
|
||||||
|
pub pkey: String,
|
||||||
|
pub mode: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct LinuxBridgeSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub options: Option<LinuxBridgeOptions>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ports: Option<Vec<LinuxBridgePort>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct LinuxBridgeOptions {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mac_ageing_time: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub multicast_snooping: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub stp: Option<StpOptions>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct StpOptions {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub enabled: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub forward_delay: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub hello_time: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub max_age: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority: Option<u16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct LinuxBridgePort {
|
||||||
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub vlan: Option<LinuxBridgePortVlan>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct LinuxBridgePortVlan {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mode: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub trunk_tags: Option<Vec<VlanTag>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tag: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub enable_native: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct VlanTag {
|
||||||
|
pub id: u16,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub id_range: Option<VlanIdRange>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct VlanIdRange {
|
||||||
|
pub min: u16,
|
||||||
|
pub max: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct OvsBridgeSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub options: Option<OvsBridgeOptions>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ports: Option<Vec<OvsPortSpec>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct OvsBridgeOptions {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub stp: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub rstp: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mcast_snooping_enable: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct OvsPortSpec {
|
||||||
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub link_aggregation: Option<BondSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub vlan: Option<LinuxBridgePortVlan>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub r#type: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct EthtoolSpec {
|
||||||
|
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct EthtoolFecSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub auto: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mode: Option<String>,
|
||||||
|
}
|
||||||
489
harmony/src/modules/okd/host_network.rs
Normal file
489
harmony/src/modules/okd/host_network.rs
Normal file
@@ -0,0 +1,489 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{debug, info};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct HostNetworkConfigurationScore {
|
||||||
|
pub hosts: Vec<PhysicalHost>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"HostNetworkConfigurationScore".into()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(HostNetworkConfigurationInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct HostNetworkConfigurationInterpret {
|
||||||
|
score: HostNetworkConfigurationScore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HostNetworkConfigurationInterpret {
|
||||||
|
async fn configure_network_for_host<T: Topology + Switch>(
|
||||||
|
&self,
|
||||||
|
topology: &T,
|
||||||
|
host: &PhysicalHost,
|
||||||
|
current_host: &usize,
|
||||||
|
total_hosts: &usize,
|
||||||
|
) -> Result<HostNetworkConfig, InterpretError> {
|
||||||
|
if host.network.is_empty() {
|
||||||
|
info!("[Host {current_host}/{total_hosts}] No interfaces to configure, skipping");
|
||||||
|
return Ok(HostNetworkConfig {
|
||||||
|
host_id: host.id.clone(),
|
||||||
|
switch_ports: vec![],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let switch_ports = self
|
||||||
|
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let config = HostNetworkConfig {
|
||||||
|
host_id: host.id.clone(),
|
||||||
|
switch_ports,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !config.switch_ports.is_empty() {
|
||||||
|
info!(
|
||||||
|
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
|
||||||
|
config.switch_ports.len(),
|
||||||
|
host.network.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
|
||||||
|
topology
|
||||||
|
.configure_host_network(&config)
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
|
||||||
|
host.network.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
||||||
|
&self,
|
||||||
|
topology: &T,
|
||||||
|
host: &PhysicalHost,
|
||||||
|
current_host: &usize,
|
||||||
|
total_hosts: &usize,
|
||||||
|
) -> Result<Vec<SwitchPort>, InterpretError> {
|
||||||
|
let mut switch_ports = vec![];
|
||||||
|
|
||||||
|
if host.network.is_empty() {
|
||||||
|
return Ok(switch_ports);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("[Host {current_host}/{total_hosts}] Collecting ports on switch...");
|
||||||
|
for network_interface in &host.network {
|
||||||
|
let mac_address = network_interface.mac_address;
|
||||||
|
|
||||||
|
match topology.get_port_for_mac_address(&mac_address).await {
|
||||||
|
Ok(Some(port)) => {
|
||||||
|
info!(
|
||||||
|
"[Host {current_host}/{total_hosts}] Found port '{port}' for '{mac_address}'"
|
||||||
|
);
|
||||||
|
switch_ports.push(SwitchPort {
|
||||||
|
interface: NetworkInterface {
|
||||||
|
name: network_interface.name.clone(),
|
||||||
|
mac_address,
|
||||||
|
speed_mbps: network_interface.speed_mbps,
|
||||||
|
mtu: network_interface.mtu,
|
||||||
|
},
|
||||||
|
port,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(None) => debug!("No port found for '{mac_address}', skipping"),
|
||||||
|
Err(e) => {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Failed to get port for host '{}': {}",
|
||||||
|
host.id, e
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(switch_ports)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_host_configuration(&self, configs: Vec<HostNetworkConfig>) -> Vec<String> {
|
||||||
|
let mut report = vec![
|
||||||
|
"Network Configuration Report".to_string(),
|
||||||
|
"------------------------------------------------------------------".to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
for config in configs {
|
||||||
|
let host = self
|
||||||
|
.score
|
||||||
|
.hosts
|
||||||
|
.iter()
|
||||||
|
.find(|h| h.id == config.host_id)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
println!("[Host] {host}");
|
||||||
|
|
||||||
|
if config.switch_ports.is_empty() {
|
||||||
|
report.push(format!(
|
||||||
|
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
||||||
|
config.host_id
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
let mappings: Vec<String> = config
|
||||||
|
.switch_ports
|
||||||
|
.iter()
|
||||||
|
.map(|p| format!("[{} -> {}]", p.interface.name, p.port))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
report.push(format!(
|
||||||
|
"✅ Host {}: Bonded {} port(s) {}",
|
||||||
|
config.host_id,
|
||||||
|
config.switch_ports.len(),
|
||||||
|
mappings.join(", ")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
report
|
||||||
|
.push("------------------------------------------------------------------".to_string());
|
||||||
|
report
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
if self.score.hosts.is_empty() {
|
||||||
|
return Ok(Outcome::noop("No hosts to configure".into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let host_count = self.score.hosts.len();
|
||||||
|
info!("Started network configuration for {host_count} host(s)...",);
|
||||||
|
|
||||||
|
info!("Setting up switch with sane defaults...");
|
||||||
|
topology
|
||||||
|
.setup_switch()
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
|
||||||
|
info!("Switch ready");
|
||||||
|
|
||||||
|
let mut current_host = 1;
|
||||||
|
let mut host_configurations = vec![];
|
||||||
|
|
||||||
|
for host in &self.score.hosts {
|
||||||
|
let host_configuration = self
|
||||||
|
.configure_network_for_host(topology, host, ¤t_host, &host_count)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
host_configurations.push(host_configuration);
|
||||||
|
current_host += 1;
|
||||||
|
}
|
||||||
|
if current_host > 1 {
|
||||||
|
let details = self.format_host_configuration(host_configurations);
|
||||||
|
|
||||||
|
Ok(Outcome::success_with_details(
|
||||||
|
format!(
|
||||||
|
"Configured {}/{} host(s)",
|
||||||
|
current_host - 1,
|
||||||
|
self.score.hosts.len()
|
||||||
|
),
|
||||||
|
details,
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Ok(Outcome::noop("No hosts configured".into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use assertor::*;
|
||||||
|
use harmony_types::{net::MacAddress, switch::PortLocation};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
hardware::HostCategory,
|
||||||
|
topology::{
|
||||||
|
HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
str::FromStr,
|
||||||
|
sync::{Arc, Mutex},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref HOST_ID: Id = Id::from_str("host-1").unwrap();
|
||||||
|
pub static ref ANOTHER_HOST_ID: Id = Id::from_str("host-2").unwrap();
|
||||||
|
pub static ref EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||||
|
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F1".to_string()).unwrap(),
|
||||||
|
name: "interface-1".into(),
|
||||||
|
speed_mbps: None,
|
||||||
|
mtu: 1,
|
||||||
|
};
|
||||||
|
pub static ref ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||||
|
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F2".to_string()).unwrap(),
|
||||||
|
name: "interface-2".into(),
|
||||||
|
speed_mbps: None,
|
||||||
|
mtu: 1,
|
||||||
|
};
|
||||||
|
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
||||||
|
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
||||||
|
name: "unknown-interface".into(),
|
||||||
|
speed_mbps: None,
|
||||||
|
mtu: 1,
|
||||||
|
};
|
||||||
|
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
||||||
|
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn should_setup_switch() {
|
||||||
|
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||||
|
let score = given_score(vec![host]);
|
||||||
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let switch_setup = topology.switch_setup.lock().unwrap();
|
||||||
|
assert_that!(*switch_setup).is_true();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
||||||
|
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||||
|
let score = given_score(vec![host]);
|
||||||
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
|
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||||
|
HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: HOST_ID.clone(),
|
||||||
|
switch_ports: vec![SwitchPort {
|
||||||
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
|
port: PORT.clone(),
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
|
||||||
|
let score = given_score(vec![given_host(
|
||||||
|
&HOST_ID,
|
||||||
|
vec![
|
||||||
|
EXISTING_INTERFACE.clone(),
|
||||||
|
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
],
|
||||||
|
)]);
|
||||||
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
|
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||||
|
HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: HOST_ID.clone(),
|
||||||
|
switch_ports: vec![
|
||||||
|
SwitchPort {
|
||||||
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
|
port: PORT.clone(),
|
||||||
|
},
|
||||||
|
SwitchPort {
|
||||||
|
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
port: ANOTHER_PORT.clone(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn multiple_hosts_should_create_one_bond_per_host() {
|
||||||
|
let score = given_score(vec![
|
||||||
|
given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
|
||||||
|
given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
|
||||||
|
]);
|
||||||
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
|
assert_that!(*configured_host_networks).contains_exactly(vec![
|
||||||
|
(
|
||||||
|
HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: HOST_ID.clone(),
|
||||||
|
switch_ports: vec![SwitchPort {
|
||||||
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
|
port: PORT.clone(),
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
ANOTHER_HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: ANOTHER_HOST_ID.clone(),
|
||||||
|
switch_ports: vec![SwitchPort {
|
||||||
|
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
port: ANOTHER_PORT.clone(),
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
||||||
|
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||||
|
let topology = TopologyWithSwitch::new_port_not_found();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
|
assert_that!(*configured_host_networks).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
||||||
|
HostNetworkConfigurationScore { hosts }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn given_host(id: &Id, network_interfaces: Vec<NetworkInterface>) -> PhysicalHost {
|
||||||
|
let network = network_interfaces.iter().map(given_interface).collect();
|
||||||
|
|
||||||
|
PhysicalHost {
|
||||||
|
id: id.clone(),
|
||||||
|
category: HostCategory::Server,
|
||||||
|
network,
|
||||||
|
storage: vec![],
|
||||||
|
labels: vec![],
|
||||||
|
memory_modules: vec![],
|
||||||
|
cpus: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn given_interface(
|
||||||
|
interface: &NetworkInterface,
|
||||||
|
) -> harmony_inventory_agent::hwinfo::NetworkInterface {
|
||||||
|
harmony_inventory_agent::hwinfo::NetworkInterface {
|
||||||
|
name: interface.name.clone(),
|
||||||
|
mac_address: interface.mac_address,
|
||||||
|
speed_mbps: interface.speed_mbps,
|
||||||
|
is_up: true,
|
||||||
|
mtu: interface.mtu,
|
||||||
|
ipv4_addresses: vec![],
|
||||||
|
ipv6_addresses: vec![],
|
||||||
|
driver: "driver".into(),
|
||||||
|
firmware_version: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TopologyWithSwitch {
|
||||||
|
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
||||||
|
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||||
|
switch_setup: Arc<Mutex<bool>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TopologyWithSwitch {
|
||||||
|
fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
||||||
|
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||||
|
switch_setup: Arc::new(Mutex::new(false)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_port_not_found() -> Self {
|
||||||
|
Self {
|
||||||
|
available_ports: Arc::new(Mutex::new(vec![])),
|
||||||
|
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||||
|
switch_setup: Arc::new(Mutex::new(false)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Topology for TopologyWithSwitch {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"SwitchWithPortTopology"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
Ok(PreparationOutcome::Success { details: "".into() })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Switch for TopologyWithSwitch {
|
||||||
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
|
let mut switch_configured = self.switch_setup.lock().unwrap();
|
||||||
|
*switch_configured = true;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_port_for_mac_address(
|
||||||
|
&self,
|
||||||
|
_mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
let mut ports = self.available_ports.lock().unwrap();
|
||||||
|
if ports.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
Ok(Some(ports.remove(0)))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_host_network(
|
||||||
|
&self,
|
||||||
|
config: &HostNetworkConfig,
|
||||||
|
) -> Result<(), SwitchError> {
|
||||||
|
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
||||||
|
configured_host_networks.push((config.host_id.clone(), config.clone()));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -50,7 +50,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
modules::okd::{
|
modules::okd::{
|
||||||
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
||||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore,
|
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
|
||||||
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
@@ -65,6 +65,7 @@ impl OKDInstallationPipeline {
|
|||||||
Box::new(OKDSetup01InventoryScore::new()),
|
Box::new(OKDSetup01InventoryScore::new()),
|
||||||
Box::new(OKDSetup02BootstrapScore::new()),
|
Box::new(OKDSetup02BootstrapScore::new()),
|
||||||
Box::new(OKDSetup03ControlPlaneScore::new()),
|
Box::new(OKDSetup03ControlPlaneScore::new()),
|
||||||
|
Box::new(OKDSetupPersistNetworkBondScore::new()),
|
||||||
Box::new(OKDSetup04WorkersScore::new()),
|
Box::new(OKDSetup04WorkersScore::new()),
|
||||||
Box::new(OKDSetup05SanityCheckScore::new()),
|
Box::new(OKDSetup05SanityCheckScore::new()),
|
||||||
Box::new(OKDSetup06InstallationReportScore::new()),
|
Box::new(OKDSetup06InstallationReportScore::new()),
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ mod bootstrap_05_sanity_check;
|
|||||||
mod bootstrap_06_installation_report;
|
mod bootstrap_06_installation_report;
|
||||||
pub mod bootstrap_dhcp;
|
pub mod bootstrap_dhcp;
|
||||||
pub mod bootstrap_load_balancer;
|
pub mod bootstrap_load_balancer;
|
||||||
|
mod bootstrap_persist_network_bond;
|
||||||
pub mod dhcp;
|
pub mod dhcp;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
pub mod installation;
|
pub mod installation;
|
||||||
@@ -19,3 +20,6 @@ pub use bootstrap_03_control_plane::*;
|
|||||||
pub use bootstrap_04_workers::*;
|
pub use bootstrap_04_workers::*;
|
||||||
pub use bootstrap_05_sanity_check::*;
|
pub use bootstrap_05_sanity_check::*;
|
||||||
pub use bootstrap_06_installation_report::*;
|
pub use bootstrap_06_installation_report::*;
|
||||||
|
pub use bootstrap_persist_network_bond::*;
|
||||||
|
pub mod crd;
|
||||||
|
pub mod host_network;
|
||||||
|
|||||||
@@ -12,7 +12,8 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::C
|
|||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
|
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
|
||||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||||
GrafanaDatasourceSpec, GrafanaSpec,
|
GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSecretKeyRef, GrafanaSpec,
|
||||||
|
GrafanaValueFrom, GrafanaValueSource,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
|
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
|
||||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||||
@@ -39,7 +40,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
use super::prometheus::PrometheusApplicationMonitoring;
|
use super::prometheus::PrometheusMonitoring;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct K8sPrometheusCRDAlertingScore {
|
pub struct K8sPrometheusCRDAlertingScore {
|
||||||
@@ -49,7 +50,7 @@ pub struct K8sPrometheusCRDAlertingScore {
|
|||||||
pub prometheus_rules: Vec<RuleGroup>,
|
pub prometheus_rules: Vec<RuleGroup>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Score<T>
|
||||||
for K8sPrometheusCRDAlertingScore
|
for K8sPrometheusCRDAlertingScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
@@ -75,7 +76,7 @@ pub struct K8sPrometheusCRDAlertingInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Interpret<T>
|
||||||
for K8sPrometheusCRDAlertingInterpret
|
for K8sPrometheusCRDAlertingInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
@@ -466,10 +467,13 @@ impl K8sPrometheusCRDAlertingInterpret {
|
|||||||
match_labels: label.clone(),
|
match_labels: label.clone(),
|
||||||
match_expressions: vec![],
|
match_expressions: vec![],
|
||||||
};
|
};
|
||||||
let mut json_data = BTreeMap::new();
|
|
||||||
json_data.insert("timeInterval".to_string(), "5s".to_string());
|
|
||||||
let namespace = self.sender.namespace.clone();
|
let namespace = self.sender.namespace.clone();
|
||||||
|
let json_data = GrafanaDatasourceJsonData {
|
||||||
|
time_interval: Some("5s".to_string()),
|
||||||
|
http_header_name1: None,
|
||||||
|
tls_skip_verify: Some(true),
|
||||||
|
oauth_pass_thru: Some(true),
|
||||||
|
};
|
||||||
let json = build_default_dashboard(&namespace);
|
let json = build_default_dashboard(&namespace);
|
||||||
|
|
||||||
let graf_data_source = GrafanaDatasource {
|
let graf_data_source = GrafanaDatasource {
|
||||||
@@ -495,7 +499,11 @@ impl K8sPrometheusCRDAlertingInterpret {
|
|||||||
"http://prometheus-operated.{}.svc.cluster.local:9090",
|
"http://prometheus-operated.{}.svc.cluster.local:9090",
|
||||||
self.sender.namespace.clone()
|
self.sender.namespace.clone()
|
||||||
),
|
),
|
||||||
|
secure_json_data: None,
|
||||||
|
is_default: None,
|
||||||
|
editable: None,
|
||||||
},
|
},
|
||||||
|
values_from: None,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -516,7 +524,9 @@ impl K8sPrometheusCRDAlertingInterpret {
|
|||||||
spec: GrafanaDashboardSpec {
|
spec: GrafanaDashboardSpec {
|
||||||
resync_period: Some("30s".to_string()),
|
resync_period: Some("30s".to_string()),
|
||||||
instance_selector: labels.clone(),
|
instance_selector: labels.clone(),
|
||||||
json,
|
json: Some(json),
|
||||||
|
grafana_com: None,
|
||||||
|
datasources: None,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -9,11 +9,17 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait PrometheusApplicationMonitoring<S: AlertSender> {
|
pub trait PrometheusMonitoring<S: AlertSender> {
|
||||||
async fn install_prometheus(
|
async fn install_prometheus(
|
||||||
&self,
|
&self,
|
||||||
sender: &S,
|
sender: &S,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
|
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
|
||||||
) -> Result<PreparationOutcome, PreparationError>;
|
) -> Result<PreparationOutcome, PreparationError>;
|
||||||
|
|
||||||
|
async fn ensure_prometheus_operator(
|
||||||
|
&self,
|
||||||
|
sender: &S,
|
||||||
|
inventory: &Inventory,
|
||||||
|
) -> Result<PreparationOutcome, PreparationError>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
use super::prometheus::PrometheusApplicationMonitoring;
|
use super::prometheus::PrometheusMonitoring;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct RHOBAlertingScore {
|
pub struct RHOBAlertingScore {
|
||||||
@@ -48,8 +48,8 @@ pub struct RHOBAlertingScore {
|
|||||||
pub prometheus_rules: Vec<RuleGroup>,
|
pub prometheus_rules: Vec<RuleGroup>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Score<T>
|
||||||
Score<T> for RHOBAlertingScore
|
for RHOBAlertingScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
Box::new(RHOBAlertingInterpret {
|
Box::new(RHOBAlertingInterpret {
|
||||||
@@ -74,8 +74,8 @@ pub struct RHOBAlertingInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Interpret<T>
|
||||||
Interpret<T> for RHOBAlertingInterpret
|
for RHOBAlertingInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::{info, warn};
|
use log::{debug, warn};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
@@ -19,8 +19,8 @@ use harmony_types::id::Id;
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct CephRemoveOsd {
|
pub struct CephRemoveOsd {
|
||||||
osd_deployment_name: String,
|
pub osd_deployment_name: String,
|
||||||
rook_ceph_namespace: String,
|
pub rook_ceph_namespace: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
||||||
@@ -54,18 +54,17 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
|||||||
self.verify_deployment_scaled(client.clone()).await?;
|
self.verify_deployment_scaled(client.clone()).await?;
|
||||||
self.delete_deployment(client.clone()).await?;
|
self.delete_deployment(client.clone()).await?;
|
||||||
self.verify_deployment_deleted(client.clone()).await?;
|
self.verify_deployment_deleted(client.clone()).await?;
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
self.purge_ceph_osd(client.clone()).await?;
|
||||||
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
|
self.verify_ceph_osd_removal(client.clone()).await?;
|
||||||
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
|
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
||||||
osd_id_full, self.score.osd_deployment_name
|
osd_id_full, self.score.osd_deployment_name
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
todo!()
|
InterpretName::CephRemoveOsd
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
fn get_version(&self) -> Version {
|
||||||
@@ -82,7 +81,7 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CephRemoveOsdInterpret {
|
impl CephRemoveOsdInterpret {
|
||||||
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
pub fn get_ceph_osd_id_numeric(&self) -> Result<String, InterpretError> {
|
||||||
let osd_id_numeric = self
|
let osd_id_numeric = self
|
||||||
.score
|
.score
|
||||||
.osd_deployment_name
|
.osd_deployment_name
|
||||||
@@ -94,9 +93,14 @@ impl CephRemoveOsdInterpret {
|
|||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
Ok(osd_id_numeric.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
||||||
|
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
||||||
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
||||||
|
|
||||||
info!(
|
debug!(
|
||||||
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
||||||
osd_id_full, self.score.osd_deployment_name
|
osd_id_full, self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -108,6 +112,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
debug!("verifying toolbox exists");
|
||||||
let toolbox_dep = "rook-ceph-tools".to_string();
|
let toolbox_dep = "rook-ceph-tools".to_string();
|
||||||
|
|
||||||
match client
|
match client
|
||||||
@@ -149,7 +154,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
info!(
|
debug!(
|
||||||
"Scaling down OSD deployment: {}",
|
"Scaling down OSD deployment: {}",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -172,7 +177,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
|
|
||||||
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
debug!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||||
loop {
|
loop {
|
||||||
let dep = client
|
let dep = client
|
||||||
.get_deployment(
|
.get_deployment(
|
||||||
@@ -180,11 +185,9 @@ impl CephRemoveOsdInterpret {
|
|||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Some(deployment) = dep {
|
if let Some(deployment) = dep {
|
||||||
if let Some(status) = deployment.status {
|
if let Some(status) = deployment.status {
|
||||||
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
|
if status.replicas == None && status.ready_replicas == None {
|
||||||
{
|
|
||||||
return Ok(Outcome::success(
|
return Ok(Outcome::success(
|
||||||
"Deployment successfully scaled down.".to_string(),
|
"Deployment successfully scaled down.".to_string(),
|
||||||
));
|
));
|
||||||
@@ -212,7 +215,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
info!(
|
debug!(
|
||||||
"Deleting OSD deployment: {}",
|
"Deleting OSD deployment: {}",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -234,7 +237,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
|
|
||||||
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
debug!("Verifying OSD deployment deleted");
|
||||||
loop {
|
loop {
|
||||||
let dep = client
|
let dep = client
|
||||||
.get_deployment(
|
.get_deployment(
|
||||||
@@ -244,7 +247,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if dep.is_none() {
|
if dep.is_none() {
|
||||||
info!(
|
debug!(
|
||||||
"Deployment {} successfully deleted.",
|
"Deployment {} successfully deleted.",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -276,12 +279,10 @@ impl CephRemoveOsdInterpret {
|
|||||||
Ok(tree)
|
Ok(tree)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn purge_ceph_osd(
|
pub async fn purge_ceph_osd(&self, client: Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||||
&self,
|
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
||||||
client: Arc<K8sClient>,
|
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||||
osd_id_full: &str,
|
debug!(
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
info!(
|
|
||||||
"Purging OSD {} from Ceph cluster and removing its auth key",
|
"Purging OSD {} from Ceph cluster and removing its auth key",
|
||||||
osd_id_full
|
osd_id_full
|
||||||
);
|
);
|
||||||
@@ -291,8 +292,9 @@ impl CephRemoveOsdInterpret {
|
|||||||
"app".to_string(),
|
"app".to_string(),
|
||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
vec![
|
vec![
|
||||||
format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
|
"sh",
|
||||||
format!("ceph auth del osd.{osd_id_full}").as_str(),
|
"-c",
|
||||||
|
format!("ceph osd purge {osd_id_numeric} --yes-i-really-mean-it && ceph auth del {osd_id_full}").as_str(),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -305,10 +307,10 @@ impl CephRemoveOsdInterpret {
|
|||||||
pub async fn verify_ceph_osd_removal(
|
pub async fn verify_ceph_osd_removal(
|
||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
osd_id_full: &str,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
info!(
|
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||||
|
debug!(
|
||||||
"Verifying OSD {} has been removed from the Ceph tree...",
|
"Verifying OSD {} has been removed from the Ceph tree...",
|
||||||
osd_id_full
|
osd_id_full
|
||||||
);
|
);
|
||||||
@@ -318,7 +320,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
"rook-ceph-tools".to_string(),
|
"rook-ceph-tools".to_string(),
|
||||||
"app".to_string(),
|
"app".to_string(),
|
||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
vec!["ceph osd tree -f json"],
|
vec!["sh", "-c", "ceph osd tree -f json"],
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
let tree =
|
let tree =
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
pub mod ceph_osd_replacement_score;
|
pub mod ceph_remove_osd_score;
|
||||||
pub mod ceph_validate_health_score;
|
pub mod ceph_validate_health_score;
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ pub fn init() {
|
|||||||
HarmonyEvent::HarmonyFinished => {
|
HarmonyEvent::HarmonyFinished => {
|
||||||
if !details.is_empty() {
|
if !details.is_empty() {
|
||||||
println!(
|
println!(
|
||||||
"\n{} All done! Here's what's next for you:",
|
"\n{} All done! Here's a few info for you:",
|
||||||
theme::EMOJI_SUMMARY
|
theme::EMOJI_SUMMARY
|
||||||
);
|
);
|
||||||
for detail in details.iter() {
|
for detail in details.iter() {
|
||||||
|
|||||||
@@ -54,6 +54,9 @@ struct DeployArgs {
|
|||||||
|
|
||||||
#[arg(long = "profile", short = 'p', default_value = "dev")]
|
#[arg(long = "profile", short = 'p', default_value = "dev")]
|
||||||
harmony_profile: HarmonyProfile,
|
harmony_profile: HarmonyProfile,
|
||||||
|
|
||||||
|
#[arg(long = "dry-run", short = 'd', default_value = "false")]
|
||||||
|
dry_run: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Args, Clone, Debug)]
|
#[derive(Args, Clone, Debug)]
|
||||||
@@ -178,6 +181,7 @@ async fn main() {
|
|||||||
command
|
command
|
||||||
.env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}"))
|
.env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}"))
|
||||||
.env("HARMONY_PROFILE", format!("{}", args.harmony_profile))
|
.env("HARMONY_PROFILE", format!("{}", args.harmony_profile))
|
||||||
|
.env("HARMONY_DRY_RUN", format!("{}", args.dry_run))
|
||||||
.arg("-y")
|
.arg("-y")
|
||||||
.arg("-a");
|
.arg("-a");
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user