Compare commits
24 Commits
feat/webap
...
feat/rebui
| Author | SHA1 | Date | |
|---|---|---|---|
| 8ee3f8a4ad | |||
| d3634a6313 | |||
| a0a8d5277c | |||
| 43b04edbae | |||
| 755a4b7749 | |||
| 66d346a10c | |||
| 06a004a65d | |||
| 9d4e6acac0 | |||
| 4ff57062ae | |||
| 50ce54ea66 | |||
|
|
827a49e56b | ||
| 95cfc03518 | |||
| c80ede706b | |||
| b2825ec1ef | |||
| 609d7acb5d | |||
| de761cf538 | |||
| ce91ee0168 | |||
| c0d54a4466 | |||
| fc384599a1 | |||
| 7dff70edcf | |||
| 06a0c44c3c | |||
| 85bec66e58 | |||
| 1f3796f503 | |||
| 58b6268989 |
31
Cargo.lock
generated
31
Cargo.lock
generated
@@ -690,6 +690,23 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "brocade-switch"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"brocade",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"serde",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "brotli"
|
||||
version = "8.0.2"
|
||||
@@ -2479,6 +2496,19 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_inventory_builder"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cidr",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_macros"
|
||||
version = "0.1.0"
|
||||
@@ -2544,6 +2574,7 @@ dependencies = [
|
||||
name = "harmony_types"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"rand 0.9.2",
|
||||
"serde",
|
||||
"url",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
use brocade::{BrocadeOptions, ssh};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use harmony_types::switch::PortLocation;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -16,23 +16,28 @@ async fn main() {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); // brocade @ sto1
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
||||
let switch_addresses = vec![ip];
|
||||
|
||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.unwrap();
|
||||
// let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
// .await
|
||||
// .unwrap();
|
||||
|
||||
let brocade = brocade::init(
|
||||
&switch_addresses,
|
||||
22,
|
||||
&config.username,
|
||||
&config.password,
|
||||
Some(BrocadeOptions {
|
||||
// &config.username,
|
||||
// &config.password,
|
||||
"admin",
|
||||
"password",
|
||||
BrocadeOptions {
|
||||
dry_run: true,
|
||||
ssh: ssh::SshOptions {
|
||||
port: 2222,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Brocade client failed to connect");
|
||||
@@ -54,6 +59,7 @@ async fn main() {
|
||||
}
|
||||
|
||||
println!("--------------");
|
||||
todo!();
|
||||
let channel_name = "1";
|
||||
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ impl BrocadeClient for FastIronClient {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
_interfaces: Vec<(String, PortOperatingMode)>,
|
||||
_interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
@@ -14,11 +14,12 @@ use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||
use regex::Regex;
|
||||
use serde::Serialize;
|
||||
|
||||
mod fast_iron;
|
||||
mod network_operating_system;
|
||||
mod shell;
|
||||
mod ssh;
|
||||
pub mod ssh;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct BrocadeOptions {
|
||||
@@ -31,6 +32,7 @@ pub struct BrocadeOptions {
|
||||
pub struct TimeoutConfig {
|
||||
pub shell_ready: Duration,
|
||||
pub command_execution: Duration,
|
||||
pub command_output: Duration,
|
||||
pub cleanup: Duration,
|
||||
pub message_wait: Duration,
|
||||
}
|
||||
@@ -40,6 +42,7 @@ impl Default for TimeoutConfig {
|
||||
Self {
|
||||
shell_ready: Duration::from_secs(10),
|
||||
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
||||
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
|
||||
cleanup: Duration::from_secs(10),
|
||||
message_wait: Duration::from_millis(500),
|
||||
}
|
||||
@@ -116,7 +119,7 @@ impl fmt::Display for InterfaceType {
|
||||
}
|
||||
|
||||
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize)]
|
||||
pub enum PortOperatingMode {
|
||||
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
||||
Fabric,
|
||||
@@ -139,12 +142,11 @@ pub enum InterfaceStatus {
|
||||
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
||||
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
||||
let shell = BrocadeShell::init(ip_addresses, username, password, options).await?;
|
||||
|
||||
let version_info = shell
|
||||
.with_session(ExecutionMode::Regular, |session| {
|
||||
@@ -206,7 +208,7 @@ pub trait BrocadeClient: std::fmt::Debug {
|
||||
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Scans the existing configuration to find the next available (unused)
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::str::FromStr;
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||
use log::{debug, info};
|
||||
use regex::Regex;
|
||||
|
||||
use crate::{
|
||||
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||
@@ -103,13 +104,37 @@ impl NetworkOperatingSystemClient {
|
||||
};
|
||||
|
||||
Some(Ok(InterfaceInfo {
|
||||
name: format!("{} {}", interface_type, port_location),
|
||||
name: format!("{interface_type} {port_location}"),
|
||||
port_location,
|
||||
interface_type,
|
||||
operating_mode,
|
||||
status,
|
||||
}))
|
||||
}
|
||||
|
||||
fn map_configure_interfaces_error(&self, err: Error) -> Error {
|
||||
debug!("[Brocade] {err}");
|
||||
|
||||
if let Error::CommandError(message) = &err {
|
||||
if message.contains("switchport")
|
||||
&& message.contains("Cannot configure aggregator member")
|
||||
{
|
||||
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
|
||||
|
||||
if let Some(caps) = re.captures(message) {
|
||||
let interface_type = &caps[1];
|
||||
let port_location = &caps[2];
|
||||
let interface = format!("{interface_type} {port_location}");
|
||||
|
||||
return Error::CommandError(format!(
|
||||
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -162,7 +187,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
||||
|
||||
@@ -179,9 +204,12 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
PortOperatingMode::Trunk => {
|
||||
commands.push("switchport".into());
|
||||
commands.push("switchport mode trunk".into());
|
||||
commands.push("no spanning-tree shutdown".into());
|
||||
commands.push("switchport trunk allowed vlan all".into());
|
||||
commands.push("no switchport trunk tag native-vlan".into());
|
||||
commands.push("spanning-tree shutdown".into());
|
||||
commands.push("no fabric isl enable".into());
|
||||
commands.push("no fabric trunk enable".into());
|
||||
commands.push("no shutdown".into());
|
||||
}
|
||||
PortOperatingMode::Access => {
|
||||
commands.push("switchport".into());
|
||||
@@ -197,11 +225,10 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
commands.push("exit".into());
|
||||
}
|
||||
|
||||
commands.push("write memory".into());
|
||||
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Regular)
|
||||
.await?;
|
||||
.await
|
||||
.map_err(|err| self.map_configure_interfaces_error(err))?;
|
||||
|
||||
info!("[Brocade] Interfaces configured.");
|
||||
|
||||
@@ -213,7 +240,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
|
||||
let output = self
|
||||
.shell
|
||||
.run_command("show port-channel", ExecutionMode::Regular)
|
||||
.run_command("show port-channel summary", ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
let used_ids: Vec<u8> = output
|
||||
@@ -248,7 +275,12 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
ports: &[PortLocation],
|
||||
) -> Result<(), Error> {
|
||||
info!(
|
||||
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
||||
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
|
||||
ports
|
||||
.iter()
|
||||
.map(|p| format!("{p}"))
|
||||
.collect::<Vec<String>>()
|
||||
.join(", ")
|
||||
);
|
||||
|
||||
let interfaces = self.get_interfaces().await?;
|
||||
@@ -276,8 +308,6 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
commands.push("exit".into());
|
||||
}
|
||||
|
||||
commands.push("write memory".into());
|
||||
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Regular)
|
||||
.await?;
|
||||
@@ -294,7 +324,6 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
"configure terminal".into(),
|
||||
format!("no interface port-channel {}", channel_name),
|
||||
"exit".into(),
|
||||
"write memory".into(),
|
||||
];
|
||||
|
||||
self.shell
|
||||
|
||||
@@ -16,7 +16,6 @@ use tokio::time::timeout;
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeShell {
|
||||
ip: IpAddr,
|
||||
port: u16,
|
||||
username: String,
|
||||
password: String,
|
||||
options: BrocadeOptions,
|
||||
@@ -27,33 +26,31 @@ pub struct BrocadeShell {
|
||||
impl BrocadeShell {
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Self, Error> {
|
||||
let ip = ip_addresses
|
||||
.first()
|
||||
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
||||
|
||||
let base_options = options.unwrap_or_default();
|
||||
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
||||
let brocade_ssh_client_options =
|
||||
ssh::try_init_client(username, password, ip, options).await?;
|
||||
|
||||
Ok(Self {
|
||||
ip: *ip,
|
||||
port,
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
before_all_commands: vec![],
|
||||
after_all_commands: vec![],
|
||||
options,
|
||||
options: brocade_ssh_client_options,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
||||
BrocadeSession::open(
|
||||
self.ip,
|
||||
self.port,
|
||||
self.options.ssh.port,
|
||||
&self.username,
|
||||
&self.password,
|
||||
self.options.clone(),
|
||||
@@ -211,7 +208,7 @@ impl BrocadeSession {
|
||||
let mut output = Vec::new();
|
||||
let start = Instant::now();
|
||||
let read_timeout = Duration::from_millis(500);
|
||||
let log_interval = Duration::from_secs(3);
|
||||
let log_interval = Duration::from_secs(5);
|
||||
let mut last_log = Instant::now();
|
||||
|
||||
loop {
|
||||
@@ -221,7 +218,9 @@ impl BrocadeSession {
|
||||
));
|
||||
}
|
||||
|
||||
if start.elapsed() > Duration::from_secs(5) && last_log.elapsed() > log_interval {
|
||||
if start.elapsed() > self.options.timeouts.command_output
|
||||
&& last_log.elapsed() > log_interval
|
||||
{
|
||||
info!("[Brocade] Waiting for command output...");
|
||||
last_log = Instant::now();
|
||||
}
|
||||
@@ -276,7 +275,7 @@ impl BrocadeSession {
|
||||
let output_lower = output.to_lowercase();
|
||||
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
||||
return Err(Error::CommandError(format!(
|
||||
"Command '{command}' failed: {}",
|
||||
"Command error: {}",
|
||||
output.trim()
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use russh::client::Handler;
|
||||
use russh::kex::DH_G1_SHA1;
|
||||
use russh::kex::ECDH_SHA2_NISTP256;
|
||||
@@ -10,29 +11,43 @@ use russh_keys::key::SSH_RSA;
|
||||
use super::BrocadeOptions;
|
||||
use super::Error;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SshOptions {
|
||||
pub preferred_algorithms: russh::Preferred,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl Default for SshOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
preferred_algorithms: Default::default(),
|
||||
port: 22,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SshOptions {
|
||||
fn ecdhsa_sha2_nistp256() -> Self {
|
||||
fn ecdhsa_sha2_nistp256(port: u16) -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
port,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn legacy() -> Self {
|
||||
fn legacy(port: u16) -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
port,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,18 +72,21 @@ pub async fn try_init_client(
|
||||
ip: &std::net::IpAddr,
|
||||
base_options: BrocadeOptions,
|
||||
) -> Result<BrocadeOptions, Error> {
|
||||
let mut default = SshOptions::default();
|
||||
default.port = base_options.ssh.port;
|
||||
let ssh_options = vec![
|
||||
SshOptions::default(),
|
||||
SshOptions::ecdhsa_sha2_nistp256(),
|
||||
SshOptions::legacy(),
|
||||
default,
|
||||
SshOptions::ecdhsa_sha2_nistp256(base_options.ssh.port),
|
||||
SshOptions::legacy(base_options.ssh.port),
|
||||
];
|
||||
|
||||
for ssh in ssh_options {
|
||||
let opts = BrocadeOptions {
|
||||
ssh,
|
||||
ssh: ssh.clone(),
|
||||
..base_options.clone()
|
||||
};
|
||||
let client = create_client(*ip, 22, username, password, &opts).await;
|
||||
debug!("Creating client {ip}:{} {username}", ssh.port);
|
||||
let client = create_client(*ip, ssh.port, username, password, &opts).await;
|
||||
|
||||
match client {
|
||||
Ok(_) => {
|
||||
|
||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Binary file not shown.
BIN
empty_database.sqlite
Normal file
BIN
empty_database.sqlite
Normal file
Binary file not shown.
@@ -27,7 +27,6 @@ async fn main() {
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
dns: "example-monitoring.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
19
examples/brocade_switch/Cargo.toml
Normal file
19
examples/brocade_switch/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "brocade-switch"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
brocade = { path = "../../brocade" }
|
||||
157
examples/brocade_switch/src/main.rs
Normal file
157
examples/brocade_switch/src/main.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use brocade::{BrocadeOptions, PortOperatingMode};
|
||||
use harmony::{
|
||||
data::Version,
|
||||
infra::brocade::BrocadeSwitchClient,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{
|
||||
HostNetworkConfig, PortConfig, PreparationError, PreparationOutcome, Switch, SwitchClient,
|
||||
SwitchError, Topology,
|
||||
},
|
||||
};
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::{id::Id, net::MacAddress, switch::PortLocation};
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let switch_score = BrocadeSwitchScore {
|
||||
port_channels_to_clear: vec![
|
||||
Id::from_str("17").unwrap(),
|
||||
Id::from_str("19").unwrap(),
|
||||
Id::from_str("18").unwrap(),
|
||||
],
|
||||
ports_to_configure: vec![
|
||||
(PortLocation(2, 0, 17), PortOperatingMode::Trunk),
|
||||
(PortLocation(2, 0, 19), PortOperatingMode::Trunk),
|
||||
(PortLocation(1, 0, 18), PortOperatingMode::Trunk),
|
||||
],
|
||||
};
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
SwitchTopology::new().await,
|
||||
vec![Box::new(switch_score)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
struct BrocadeSwitchScore {
|
||||
port_channels_to_clear: Vec<Id>,
|
||||
ports_to_configure: Vec<PortConfig>,
|
||||
}
|
||||
|
||||
impl<T: Topology + Switch> Score<T> for BrocadeSwitchScore {
|
||||
fn name(&self) -> String {
|
||||
"BrocadeSwitchScore".to_string()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(BrocadeSwitchInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BrocadeSwitchInterpret {
|
||||
score: BrocadeSwitchScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + Switch> Interpret<T> for BrocadeSwitchInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!("Applying switch configuration {:?}", self.score);
|
||||
debug!(
|
||||
"Clearing port channel {:?}",
|
||||
self.score.port_channels_to_clear
|
||||
);
|
||||
topology
|
||||
.clear_port_channel(&self.score.port_channels_to_clear)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
debug!("Configuring interfaces {:?}", self.score.ports_to_configure);
|
||||
topology
|
||||
.configure_interface(&self.score.ports_to_configure)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
Ok(Outcome::success("switch configured".to_string()))
|
||||
}
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("BrocadeSwitchInterpret")
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
struct SwitchTopology {
|
||||
client: Box<dyn SwitchClient>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Topology for SwitchTopology {
|
||||
fn name(&self) -> &str {
|
||||
"SwitchTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
Ok(PreparationOutcome::Noop)
|
||||
}
|
||||
}
|
||||
|
||||
impl SwitchTopology {
|
||||
async fn new() -> Self {
|
||||
let mut options = BrocadeOptions::default();
|
||||
options.ssh.port = 2222;
|
||||
let client =
|
||||
BrocadeSwitchClient::init(&vec![ip!("127.0.0.1")], &"admin", &"password", options)
|
||||
.await
|
||||
.expect("Failed to connect to switch");
|
||||
|
||||
let client = Box::new(client);
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Switch for SwitchTopology {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_port_for_mac_address(
|
||||
&self,
|
||||
_mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn configure_port_channel(&self, _config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
self.client.clear_port_channel(ids).await
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
self.client.configure_interface(ports).await
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||
},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
@@ -18,6 +18,7 @@ async fn main() {
|
||||
Box::new(PanicScore {}),
|
||||
Box::new(LaunchDiscoverInventoryAgentScore {
|
||||
discovery_timeout: Some(10),
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||
}),
|
||||
],
|
||||
None,
|
||||
|
||||
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "harmony_inventory_builder"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
cidr.workspace = true
|
||||
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
cargo build -p harmony_inventory_builder --release --target x86_64-unknown-linux-musl
|
||||
|
||||
SCRIPT_DIR="$(dirname ${0})"
|
||||
|
||||
cd "${SCRIPT_DIR}/docker/"
|
||||
|
||||
cp ../../../target/x86_64-unknown-linux-musl/release/harmony_inventory_builder .
|
||||
|
||||
docker build . -t hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
|
||||
docker push hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
FROM debian:12-slim
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app/
|
||||
|
||||
COPY harmony_inventory_builder /app/
|
||||
|
||||
ENV RUST_LOG=info
|
||||
|
||||
CMD ["sleep", "infinity"]
|
||||
36
examples/harmony_inventory_builder/src/main.rs
Normal file
36
examples/harmony_inventory_builder/src/main.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use harmony::{
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
use harmony_macros::cidrv4;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let discover_worker = DiscoverHostForRoleScore {
|
||||
role: HostRole::Worker,
|
||||
number_desired_hosts: 3,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||
cidr: cidrv4!("192.168.0.1/25"),
|
||||
port: 25000,
|
||||
},
|
||||
};
|
||||
|
||||
let discover_control_plane = DiscoverHostForRoleScore {
|
||||
role: HostRole::ControlPlane,
|
||||
number_desired_hosts: 3,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||
cidr: cidrv4!("192.168.0.1/25"),
|
||||
port: 25000,
|
||||
},
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
LocalhostTopology::new(),
|
||||
vec![Box::new(discover_worker), Box::new(discover_control_plane)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::Arc,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
@@ -39,10 +39,10 @@ async fn main() {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -61,6 +61,7 @@ async fn main() {
|
||||
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||
let topology = harmony::topology::HAClusterTopology {
|
||||
kubeconfig: None,
|
||||
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
||||
// when setting up the opnsense firewall
|
||||
router: Arc::new(UnmanagedRouter::new(
|
||||
@@ -106,6 +107,7 @@ async fn main() {
|
||||
},
|
||||
],
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
};
|
||||
|
||||
let inventory = Inventory {
|
||||
|
||||
@@ -9,7 +9,10 @@ use harmony::{
|
||||
use harmony_macros::{ip, ipv4};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{net::IpAddr, sync::Arc};
|
||||
use std::{
|
||||
net::IpAddr,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||
struct OPNSenseFirewallConfig {
|
||||
@@ -28,10 +31,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -59,6 +62,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||
harmony::topology::HAClusterTopology {
|
||||
kubeconfig: None,
|
||||
domain_name: "demo.harmony.mcd".to_string(),
|
||||
router: Arc::new(UnmanagedRouter::new(
|
||||
gateway_ip,
|
||||
@@ -80,6 +84,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
},
|
||||
workers: vec![],
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@ use harmony::{
|
||||
use harmony_macros::{ip, ipv4};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{net::IpAddr, sync::Arc};
|
||||
use std::{
|
||||
net::IpAddr,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
pub async fn get_topology() -> HAClusterTopology {
|
||||
let firewall = harmony::topology::LogicalHost {
|
||||
@@ -23,10 +26,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -54,6 +57,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||
harmony::topology::HAClusterTopology {
|
||||
kubeconfig: None,
|
||||
domain_name: "demo.harmony.mcd".to_string(),
|
||||
router: Arc::new(UnmanagedRouter::new(
|
||||
gateway_ip,
|
||||
@@ -75,6 +79,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
},
|
||||
workers: vec![],
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::Arc,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
@@ -35,10 +35,10 @@ async fn main() {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -57,6 +57,7 @@ async fn main() {
|
||||
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||
let topology = harmony::topology::HAClusterTopology {
|
||||
kubeconfig: None,
|
||||
domain_name: "demo.harmony.mcd".to_string(),
|
||||
router: Arc::new(UnmanagedRouter::new(
|
||||
gateway_ip,
|
||||
@@ -78,6 +79,7 @@ async fn main() {
|
||||
},
|
||||
workers: vec![],
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
};
|
||||
|
||||
let inventory = Inventory {
|
||||
|
||||
@@ -16,7 +16,6 @@ use harmony_types::net::Url;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "test-rhob-monitoring".to_string(),
|
||||
dns: "test-rhob-monitoring.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -19,7 +19,6 @@ use harmony_macros::hurl;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
dns: "harmony-example-rust-webapp.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -2,11 +2,12 @@ use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
features::{rhob_monitoring::Monitoring, PackagingDeployment}, ApplicationScore, RustWebFramework, RustWebapp
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, LocalhostTopology},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
@@ -21,8 +22,8 @@ async fn main() {
|
||||
});
|
||||
|
||||
let discord_webhook = DiscordWebhook {
|
||||
name: "harmony-demo".to_string(),
|
||||
url: hurl!("https://discord.com/api/webhooks/1415391405681021050/V6KzV41vQ7yvbn7BchejRu9C8OANxy0i2ESZOz2nvCxG8xAY3-2i3s5MS38k568JKTzH"),
|
||||
name: "harmony_demo".to_string(),
|
||||
url: hurl!("http://not_a_url.com"),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
|
||||
@@ -3,7 +3,7 @@ use harmony::{
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||
features::{Monitoring, PackagingDeployment},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
@@ -16,7 +16,6 @@ use std::{path::PathBuf, sync::Arc};
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-tryrust".to_string(),
|
||||
dns: "tryrust.example.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
||||
// submodule
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
|
||||
@@ -152,10 +152,10 @@ impl PhysicalHost {
|
||||
pub fn parts_list(&self) -> String {
|
||||
let PhysicalHost {
|
||||
id,
|
||||
category,
|
||||
category: _,
|
||||
network,
|
||||
storage,
|
||||
labels,
|
||||
labels: _,
|
||||
memory_modules,
|
||||
cpus,
|
||||
} = self;
|
||||
@@ -226,8 +226,8 @@ impl PhysicalHost {
|
||||
speed_mhz,
|
||||
manufacturer,
|
||||
part_number,
|
||||
serial_number,
|
||||
rank,
|
||||
serial_number: _,
|
||||
rank: _,
|
||||
} = mem;
|
||||
parts_list.push_str(&format!(
|
||||
"\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})",
|
||||
|
||||
@@ -4,6 +4,8 @@ use std::error::Error;
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
|
||||
use crate::inventory::HostRole;
|
||||
|
||||
use super::{
|
||||
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
||||
};
|
||||
|
||||
@@ -1,32 +1,26 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::PortOperatingMode;
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
net::{MacAddress, Url},
|
||||
switch::PortLocation,
|
||||
};
|
||||
use k8s_openapi::api::core::v1::Namespace;
|
||||
use kube::api::ObjectMeta;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
|
||||
use crate::data::FileContent;
|
||||
use crate::executors::ExecutorError;
|
||||
use crate::hardware::PhysicalHost;
|
||||
use crate::modules::okd::crd::{
|
||||
InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec,
|
||||
nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec},
|
||||
};
|
||||
use crate::{infra::network_manager::OpenShiftNmStateNetworkManager, topology::PortConfig};
|
||||
use crate::topology::PxeOptions;
|
||||
use crate::{data::FileContent, executors::ExecutorError};
|
||||
|
||||
use super::{
|
||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
||||
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
||||
Topology, k8s::K8sClient,
|
||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, NetworkError,
|
||||
NetworkManager, PreparationError, PreparationOutcome, Router, Switch, SwitchClient,
|
||||
SwitchError, TftpServer, Topology, k8s::K8sClient,
|
||||
};
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HAClusterTopology {
|
||||
@@ -42,6 +36,8 @@ pub struct HAClusterTopology {
|
||||
pub bootstrap_host: LogicalHost,
|
||||
pub control_plane: Vec<LogicalHost>,
|
||||
pub workers: Vec<LogicalHost>,
|
||||
pub kubeconfig: Option<String>,
|
||||
pub network_manager: OnceLock<Arc<dyn NetworkManager>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -60,9 +56,17 @@ impl Topology for HAClusterTopology {
|
||||
#[async_trait]
|
||||
impl K8sclient for HAClusterTopology {
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||
Ok(Arc::new(
|
||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||
))
|
||||
match &self.kubeconfig {
|
||||
None => Ok(Arc::new(
|
||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||
)),
|
||||
Some(kubeconfig) => {
|
||||
let Some(client) = K8sClient::from_kubeconfig(kubeconfig).await else {
|
||||
return Err("Failed to create k8s client".to_string());
|
||||
};
|
||||
Ok(Arc::new(client))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,208 +91,12 @@ impl HAClusterTopology {
|
||||
.to_string()
|
||||
}
|
||||
|
||||
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
||||
// FIXME: Find a way to check nmstate is already available (get pod -n openshift-nmstate)
|
||||
debug!("Installing NMState operator...");
|
||||
let k8s_client = self.k8s_client().await?;
|
||||
pub async fn network_manager(&self) -> &dyn NetworkManager {
|
||||
let k8s_client = self.k8s_client().await.unwrap();
|
||||
|
||||
let nmstate_namespace = Namespace {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("openshift-nmstate".to_string()),
|
||||
finalizers: Some(vec!["kubernetes".to_string()]),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
debug!("Creating NMState namespace: {nmstate_namespace:#?}");
|
||||
k8s_client
|
||||
.apply(&nmstate_namespace, None)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let nmstate_operator_group = OperatorGroup {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("openshift-nmstate".to_string()),
|
||||
namespace: Some("openshift-nmstate".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: OperatorGroupSpec {
|
||||
target_namespaces: vec!["openshift-nmstate".to_string()],
|
||||
},
|
||||
};
|
||||
debug!("Creating NMState operator group: {nmstate_operator_group:#?}");
|
||||
k8s_client
|
||||
.apply(&nmstate_operator_group, None)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let nmstate_subscription = Subscription {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("kubernetes-nmstate-operator".to_string()),
|
||||
namespace: Some("openshift-nmstate".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: SubscriptionSpec {
|
||||
channel: Some("stable".to_string()),
|
||||
install_plan_approval: Some(InstallPlanApproval::Automatic),
|
||||
name: "kubernetes-nmstate-operator".to_string(),
|
||||
source: "redhat-operators".to_string(),
|
||||
source_namespace: "openshift-marketplace".to_string(),
|
||||
},
|
||||
};
|
||||
debug!("Subscribing to NMState Operator: {nmstate_subscription:#?}");
|
||||
k8s_client
|
||||
.apply(&nmstate_subscription, None)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let nmstate = NMState {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("nmstate".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
debug!("Creating NMState: {nmstate:#?}");
|
||||
k8s_client
|
||||
.apply(&nmstate, None)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_next_bond_id(&self) -> u8 {
|
||||
42 // FIXME: Find a better way to declare the bond id
|
||||
}
|
||||
|
||||
async fn configure_bond(
|
||||
&self,
|
||||
host: &PhysicalHost,
|
||||
config: &HostNetworkConfig,
|
||||
) -> Result<(), SwitchError> {
|
||||
self.ensure_nmstate_operator_installed()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
SwitchError::new(format!(
|
||||
"Can't configure bond, NMState operator not available: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let bond_config = self.create_bond_configuration(host, config);
|
||||
debug!("Configuring bond for host {host:?}: {bond_config:#?}");
|
||||
self.k8s_client()
|
||||
.await
|
||||
.unwrap()
|
||||
.apply(&bond_config, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn create_bond_configuration(
|
||||
&self,
|
||||
host: &PhysicalHost,
|
||||
config: &HostNetworkConfig,
|
||||
) -> NodeNetworkConfigurationPolicy {
|
||||
let host_name = host.id.clone();
|
||||
|
||||
let bond_id = self.get_next_bond_id();
|
||||
let bond_name = format!("bond{bond_id}");
|
||||
let mut bond_mtu: Option<u32> = None;
|
||||
let mut bond_mac_address: Option<String> = None;
|
||||
let mut bond_ports = Vec::new();
|
||||
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
||||
|
||||
for switch_port in &config.switch_ports {
|
||||
let interface_name = switch_port.interface.name.clone();
|
||||
|
||||
interfaces.push(nmstate::InterfaceSpec {
|
||||
name: interface_name.clone(),
|
||||
description: Some(format!("Member of bond {bond_name}")),
|
||||
r#type: "ethernet".to_string(),
|
||||
state: "up".to_string(),
|
||||
mtu: Some(switch_port.interface.mtu),
|
||||
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: None,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
bond_ports.push(interface_name);
|
||||
|
||||
// Use the first port's details for the bond mtu and mac address
|
||||
if bond_mtu.is_none() {
|
||||
bond_mtu = Some(switch_port.interface.mtu);
|
||||
}
|
||||
if bond_mac_address.is_none() {
|
||||
bond_mac_address = Some(switch_port.interface.mac_address.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
interfaces.push(nmstate::InterfaceSpec {
|
||||
name: bond_name.clone(),
|
||||
description: Some(format!("Network bond for host {host_name}")),
|
||||
r#type: "bond".to_string(),
|
||||
state: "up".to_string(),
|
||||
mtu: bond_mtu,
|
||||
mac_address: bond_mac_address,
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
autoconf: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: Some(nmstate::BondSpec {
|
||||
mode: "802.3ad".to_string(),
|
||||
ports: bond_ports,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
NodeNetworkConfigurationPolicy {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{host_name}-bond-config")),
|
||||
..Default::default()
|
||||
},
|
||||
spec: NodeNetworkConfigurationPolicySpec {
|
||||
node_selector: Some(BTreeMap::from([(
|
||||
"kubernetes.io/hostname".to_string(),
|
||||
host_name.to_string(),
|
||||
)])),
|
||||
desired_state: nmstate::DesiredStateSpec { interfaces },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn configure_port_channel(
|
||||
&self,
|
||||
host: &PhysicalHost,
|
||||
config: &HostNetworkConfig,
|
||||
) -> Result<(), SwitchError> {
|
||||
debug!("Configuring port channel: {config:#?}");
|
||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||
|
||||
self.switch_client
|
||||
.configure_port_channel(&format!("Harmony_{}", host.id), switch_ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
self.network_manager
|
||||
.get_or_init(|| Arc::new(OpenShiftNmStateNetworkManager::new(k8s_client.clone())))
|
||||
.as_ref()
|
||||
}
|
||||
|
||||
pub fn autoload() -> Self {
|
||||
@@ -299,6 +107,7 @@ impl HAClusterTopology {
|
||||
};
|
||||
|
||||
Self {
|
||||
kubeconfig: None,
|
||||
domain_name: "DummyTopology".to_string(),
|
||||
router: dummy_infra.clone(),
|
||||
load_balancer: dummy_infra.clone(),
|
||||
@@ -311,6 +120,7 @@ impl HAClusterTopology {
|
||||
bootstrap_host: dummy_host,
|
||||
control_plane: vec![],
|
||||
workers: vec![],
|
||||
network_manager: OnceLock::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -468,25 +278,50 @@ impl HttpServer for HAClusterTopology {
|
||||
#[async_trait]
|
||||
impl Switch for HAClusterTopology {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
self.switch_client.setup().await?;
|
||||
Ok(())
|
||||
self.switch_client.setup().await.map(|_| ())
|
||||
}
|
||||
|
||||
async fn get_port_for_mac_address(
|
||||
&self,
|
||||
mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError> {
|
||||
let port = self.switch_client.find_port(mac_address).await?;
|
||||
Ok(port)
|
||||
self.switch_client.find_port(mac_address).await
|
||||
}
|
||||
|
||||
async fn configure_host_network(
|
||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
debug!("Configuring port channel: {config:#?}");
|
||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||
|
||||
self.switch_client
|
||||
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to configure port-channel: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(
|
||||
&self,
|
||||
host: &PhysicalHost,
|
||||
config: HostNetworkConfig,
|
||||
ports: &Vec<PortConfig>,
|
||||
) -> Result<(), SwitchError> {
|
||||
self.configure_bond(host, &config).await?;
|
||||
self.configure_port_channel(host, &config).await
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NetworkManager for HAClusterTopology {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||
self.network_manager()
|
||||
.await
|
||||
.ensure_network_manager_installed()
|
||||
.await
|
||||
}
|
||||
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||
self.network_manager().await.configure_bond(config).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -697,4 +532,6 @@ impl SwitchClient for DummyInfra {
|
||||
) -> Result<u8, SwitchError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {todo!()}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {todo!()}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
use std::time::Duration;
|
||||
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{
|
||||
ClusterResourceScope, NamespaceResourceScope,
|
||||
api::{apps::v1::Deployment, core::v1::Pod},
|
||||
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
|
||||
api::{
|
||||
apps::v1::Deployment,
|
||||
core::v1::{Node, Pod, ServiceAccount},
|
||||
},
|
||||
apimachinery::pkg::version::Info,
|
||||
};
|
||||
use kube::{
|
||||
Client, Config, Discovery, Error, Resource,
|
||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
api::{
|
||||
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
||||
},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
discovery::{ApiCapabilities, Scope},
|
||||
error::DiscoveryError,
|
||||
runtime::reflector::Lookup,
|
||||
};
|
||||
@@ -20,11 +25,12 @@ use kube::{
|
||||
api::{ApiResource, GroupVersionKind},
|
||||
runtime::wait::await_condition,
|
||||
};
|
||||
use log::{debug, error, info, trace};
|
||||
use log::{debug, error, trace, warn};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::{json, Value};
|
||||
use serde_json::json;
|
||||
use similar::TextDiff;
|
||||
use tokio::{io::AsyncReadExt, time::sleep};
|
||||
use url::Url;
|
||||
|
||||
#[derive(new, Clone)]
|
||||
pub struct K8sClient {
|
||||
@@ -58,146 +64,9 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
// Returns true if any deployment in the given namespace matching the label selector
|
||||
// has status.availableReplicas > 0 (or condition Available=True).
|
||||
pub async fn has_healthy_deployment_with_label(
|
||||
&self,
|
||||
namespace: &str,
|
||||
label_selector: &str,
|
||||
) -> Result<bool, Error> {
|
||||
let api: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
|
||||
let lp = ListParams::default().labels(label_selector);
|
||||
let list = api.list(&lp).await?;
|
||||
for d in list.items {
|
||||
// Check AvailableReplicas > 0 or Available condition
|
||||
let available = d
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.available_replicas)
|
||||
.unwrap_or(0);
|
||||
if available > 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
// Fallback: scan conditions
|
||||
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
|
||||
if conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
}) {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Cluster-wide: returns namespaces that have at least one healthy deployment
|
||||
// matching the label selector (equivalent to kubectl -A -l ...).
|
||||
pub async fn list_namespaces_with_healthy_deployments(
|
||||
&self,
|
||||
label_selector: &str,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let api: Api<Deployment> = Api::all(self.client.clone());
|
||||
let lp = ListParams::default().labels(label_selector);
|
||||
let list = api.list(&lp).await?;
|
||||
|
||||
let mut healthy_ns: HashMap<String, bool> = HashMap::new();
|
||||
for d in list.items {
|
||||
let ns = match d.metadata.namespace.clone() {
|
||||
Some(n) => n,
|
||||
None => continue,
|
||||
};
|
||||
let available = d
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.available_replicas)
|
||||
.unwrap_or(0);
|
||||
let is_healthy = if available > 0 {
|
||||
true
|
||||
} else {
|
||||
d.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.conditions.as_ref())
|
||||
.map(|conds| {
|
||||
conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
})
|
||||
})
|
||||
.unwrap_or(false)
|
||||
};
|
||||
if is_healthy {
|
||||
healthy_ns.insert(ns, true);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(healthy_ns.into_keys().collect())
|
||||
}
|
||||
|
||||
// Get the application-controller ServiceAccount name (fallback to default)
|
||||
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> {
|
||||
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
|
||||
let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
|
||||
let list = api.list(&lp).await?;
|
||||
if let Some(dep) = list.items.get(0) {
|
||||
if let Some(sa) = dep
|
||||
.spec
|
||||
.as_ref()
|
||||
.and_then(|ds| ds.template.spec.as_ref())
|
||||
.and_then(|ps| ps.service_account_name.clone())
|
||||
{
|
||||
return Ok(sa);
|
||||
}
|
||||
}
|
||||
Ok("argocd-application-controller".to_string())
|
||||
}
|
||||
|
||||
// List ClusterRoleBindings dynamically and return as JSON values
|
||||
pub async fn list_clusterrolebindings_json(&self) -> Result<Vec<Value>, Error> {
|
||||
let gvk = kube::api::GroupVersionKind::gvk(
|
||||
"rbac.authorization.k8s.io",
|
||||
"v1",
|
||||
"ClusterRoleBinding",
|
||||
);
|
||||
let ar = kube::api::ApiResource::from_gvk(&gvk);
|
||||
let api: Api<kube::api::DynamicObject> = Api::all_with(self.client.clone(), &ar);
|
||||
let crbs = api.list(&ListParams::default()).await?;
|
||||
let mut out = Vec::new();
|
||||
for o in crbs {
|
||||
let v = serde_json::to_value(&o).unwrap_or(Value::Null);
|
||||
out.push(v);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// Determine if Argo controller in ns has cluster-wide permissions via CRBs
|
||||
// TODO This does not belong in the generic k8s client, should be refactored at some point
|
||||
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> {
|
||||
let sa = self.get_argocd_controller_sa_name(ns).await?;
|
||||
let crbs = self.list_clusterrolebindings_json().await?;
|
||||
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
|
||||
for crb in crbs {
|
||||
if let Some(subjects) = crb.get("subjects").and_then(|s| s.as_array()) {
|
||||
for subj in subjects {
|
||||
let kind = subj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let name = subj.get("name").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let subj_ns = subj.get("namespace").and_then(|v| v.as_str()).unwrap_or("");
|
||||
if (kind == "ServiceAccount" && name == sa && subj_ns == ns)
|
||||
|| (kind == "User" && name == sa_user)
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub async fn has_crd(&self, name: &str) -> Result<bool, Error> {
|
||||
let api: Api<CustomResourceDefinition> = Api::all(self.client.clone());
|
||||
let lp = ListParams::default().fields(&format!("metadata.name={}", name));
|
||||
let crds = api.list(&lp).await?;
|
||||
Ok(!crds.items.is_empty())
|
||||
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
|
||||
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
|
||||
api
|
||||
}
|
||||
|
||||
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
||||
@@ -223,7 +92,8 @@ impl K8sClient {
|
||||
} else {
|
||||
Api::default_namespaced_with(self.client.clone(), &gvk)
|
||||
};
|
||||
Ok(resource.get(name).await?)
|
||||
|
||||
resource.get(name).await
|
||||
}
|
||||
|
||||
pub async fn get_deployment(
|
||||
@@ -238,8 +108,9 @@ impl K8sClient {
|
||||
debug!("getting default namespace deployment");
|
||||
Api::default_namespaced(self.client.clone())
|
||||
};
|
||||
|
||||
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
||||
Ok(deps.get_opt(name).await?)
|
||||
deps.get_opt(name).await
|
||||
}
|
||||
|
||||
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
||||
@@ -248,7 +119,8 @@ impl K8sClient {
|
||||
} else {
|
||||
Api::default_namespaced(self.client.clone())
|
||||
};
|
||||
Ok(pods.get_opt(name).await?)
|
||||
|
||||
pods.get_opt(name).await
|
||||
}
|
||||
|
||||
pub async fn scale_deployment(
|
||||
@@ -291,9 +163,9 @@ impl K8sClient {
|
||||
|
||||
pub async fn wait_until_deployment_ready(
|
||||
&self,
|
||||
name: String,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
timeout: Option<u64>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<(), String> {
|
||||
let api: Api<Deployment>;
|
||||
|
||||
@@ -303,9 +175,9 @@ impl K8sClient {
|
||||
api = Api::default_namespaced(self.client.clone());
|
||||
}
|
||||
|
||||
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
||||
let t = timeout.unwrap_or(300);
|
||||
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
|
||||
let establish = await_condition(api, name, conditions::is_deployment_completed());
|
||||
let timeout = timeout.unwrap_or(Duration::from_secs(120));
|
||||
let res = tokio::time::timeout(timeout, establish).await;
|
||||
|
||||
if res.is_ok() {
|
||||
Ok(())
|
||||
@@ -395,7 +267,7 @@ impl K8sClient {
|
||||
|
||||
if let Some(s) = status.status {
|
||||
let mut stdout_buf = String::new();
|
||||
if let Some(mut stdout) = process.stdout().take() {
|
||||
if let Some(mut stdout) = process.stdout() {
|
||||
stdout
|
||||
.read_to_string(&mut stdout_buf)
|
||||
.await
|
||||
@@ -501,14 +373,14 @@ impl K8sClient {
|
||||
Ok(current) => {
|
||||
trace!("Received current value {current:#?}");
|
||||
// The resource exists, so we calculate and display a diff.
|
||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
||||
println!("\nPerforming dry-run for resource: '{name}'");
|
||||
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
||||
panic!("Could not serialize current value : {current:#?}")
|
||||
});
|
||||
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
||||
let map = current_yaml.as_mapping_mut().unwrap();
|
||||
let removed = map.remove_entry("status");
|
||||
trace!("Removed status {:?}", removed);
|
||||
trace!("Removed status {removed:?}");
|
||||
} else {
|
||||
trace!(
|
||||
"Did not find status entry for current object {}/{}",
|
||||
@@ -537,14 +409,14 @@ impl K8sClient {
|
||||
similar::ChangeTag::Insert => "+",
|
||||
similar::ChangeTag::Equal => " ",
|
||||
};
|
||||
print!("{}{}", sign, change);
|
||||
print!("{sign}{change}");
|
||||
}
|
||||
// In a dry run, we return the new resource state that would have been applied.
|
||||
Ok(resource.clone())
|
||||
}
|
||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||
// The resource does not exist, so the "diff" is the entire new resource.
|
||||
println!("\nPerforming dry-run for new resource: '{}'", name);
|
||||
println!("\nPerforming dry-run for new resource: '{name}'");
|
||||
println!(
|
||||
"Resource does not exist. It would be created with the following content:"
|
||||
);
|
||||
@@ -553,14 +425,14 @@ impl K8sClient {
|
||||
|
||||
// Print each line of the new resource with a '+' prefix.
|
||||
for line in new_yaml.lines() {
|
||||
println!("+{}", line);
|
||||
println!("+{line}");
|
||||
}
|
||||
// In a dry run, we return the new resource state that would have been created.
|
||||
Ok(resource.clone())
|
||||
}
|
||||
Err(e) => {
|
||||
// Another API error occurred.
|
||||
error!("Failed to get resource '{}': {}", name, e);
|
||||
error!("Failed to get resource '{name}': {e}");
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
@@ -575,7 +447,7 @@ impl K8sClient {
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
<K as Resource>::DynamicType: Default,
|
||||
{
|
||||
let mut result = Vec::new();
|
||||
for r in resource.iter() {
|
||||
@@ -640,10 +512,7 @@ impl K8sClient {
|
||||
|
||||
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||
// This will create the resource if it doesn't exist, or update it if it does.
|
||||
println!(
|
||||
"Applying Argo Application '{}' in namespace '{}'...",
|
||||
name, namespace
|
||||
);
|
||||
println!("Applying '{name}' in namespace '{namespace}'...",);
|
||||
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
||||
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||
|
||||
@@ -652,7 +521,103 @@ impl K8sClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||
/// Apply a resource from a URL
|
||||
///
|
||||
/// It is the equivalent of `kubectl apply -f <url>`
|
||||
pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
|
||||
let patch_params = PatchParams::apply("harmony");
|
||||
let discovery = kube::Discovery::new(self.client.clone()).run().await?;
|
||||
|
||||
let yaml = reqwest::get(url)
|
||||
.await
|
||||
.expect("Could not get URL")
|
||||
.text()
|
||||
.await
|
||||
.expect("Could not get content from URL");
|
||||
|
||||
for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") {
|
||||
let obj: DynamicObject =
|
||||
serde_yaml::from_value(doc).expect("cannot apply without valid YAML");
|
||||
let namespace = obj.metadata.namespace.as_deref().or(ns);
|
||||
let type_meta = obj
|
||||
.types
|
||||
.as_ref()
|
||||
.expect("cannot apply object without valid TypeMeta");
|
||||
let gvk = GroupVersionKind::try_from(type_meta)
|
||||
.expect("cannot apply object without valid GroupVersionKind");
|
||||
let name = obj.name_any();
|
||||
|
||||
if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
|
||||
let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
|
||||
trace!(
|
||||
"Applying {}: \n{}",
|
||||
gvk.kind,
|
||||
serde_yaml::to_string(&obj).expect("Failed to serialize YAML")
|
||||
);
|
||||
let data: serde_json::Value =
|
||||
serde_json::to_value(&obj).expect("Failed to serialize JSON");
|
||||
let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
|
||||
debug!("applied {} {}", gvk.kind, name);
|
||||
} else {
|
||||
warn!("Cannot apply document for unknown {gvk:?}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets a single named resource of a specific type `K`.
|
||||
///
|
||||
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||
/// whether to look in a specific namespace or in the entire cluster.
|
||||
///
|
||||
/// Returns `Ok(None)` if the resource is not found (404).
|
||||
pub async fn get_resource<K>(
|
||||
&self,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<Option<K>, Error>
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
let api: Api<K> =
|
||||
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||
|
||||
api.get_opt(name).await
|
||||
}
|
||||
|
||||
/// Lists all resources of a specific type `K`.
|
||||
///
|
||||
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||
/// whether to list from a specific namespace or from the entire cluster.
|
||||
pub async fn list_resources<K>(
|
||||
&self,
|
||||
namespace: Option<&str>,
|
||||
list_params: Option<ListParams>,
|
||||
) -> Result<ObjectList<K>, Error>
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
let api: Api<K> =
|
||||
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||
|
||||
let list_params = list_params.unwrap_or_default();
|
||||
api.list(&list_params).await
|
||||
}
|
||||
|
||||
/// Fetches a list of all Nodes in the cluster.
|
||||
pub async fn get_nodes(
|
||||
&self,
|
||||
list_params: Option<ListParams>,
|
||||
) -> Result<ObjectList<Node>, Error> {
|
||||
self.list_resources(None, list_params).await
|
||||
}
|
||||
|
||||
pub async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||
let k = match Kubeconfig::read_from(path) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
@@ -671,6 +636,31 @@ impl K8sClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_dynamic_api(
|
||||
resource: ApiResource,
|
||||
capabilities: ApiCapabilities,
|
||||
client: Client,
|
||||
ns: Option<&str>,
|
||||
all: bool,
|
||||
) -> Api<DynamicObject> {
|
||||
if capabilities.scope == Scope::Cluster || all {
|
||||
Api::all_with(client, &resource)
|
||||
} else if let Some(namespace) = ns {
|
||||
Api::namespaced_with(client, namespace, &resource)
|
||||
} else {
|
||||
Api::default_namespaced_with(client, &resource)
|
||||
}
|
||||
}
|
||||
|
||||
fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
|
||||
use serde::Deserialize;
|
||||
let mut docs = vec![];
|
||||
for de in serde_yaml::Deserializer::from_str(data) {
|
||||
docs.push(serde_yaml::Value::deserialize(de)?);
|
||||
}
|
||||
Ok(docs)
|
||||
}
|
||||
|
||||
pub trait ApplyStrategy<K: Resource> {
|
||||
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
use std::{process::Command, sync::Arc};
|
||||
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::GroupVersionKind;
|
||||
use log::{debug, info, trace, warn};
|
||||
use base64::{Engine, engine::general_purpose};
|
||||
use k8s_openapi::api::{
|
||||
core::v1::Secret,
|
||||
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
|
||||
};
|
||||
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
|
||||
use log::{debug, info, warn};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
@@ -12,14 +17,26 @@ use crate::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
k3d::K3DInstallationScore,
|
||||
monitoring::kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus,
|
||||
prometheus_operator::prometheus_operator_helm_chart_score,
|
||||
rhob_alertmanager_config::RHOBObservability,
|
||||
k8s::ingress::{K8sIngressScore, PathType},
|
||||
monitoring::{
|
||||
grafana::{grafana::Grafana, helm::helm_grafana::grafana_helm_chart_score},
|
||||
kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus,
|
||||
crd_grafana::{
|
||||
Grafana as GrafanaCRD, GrafanaCom, GrafanaDashboard,
|
||||
GrafanaDashboardDatasource, GrafanaDashboardSpec, GrafanaDatasource,
|
||||
GrafanaDatasourceConfig, GrafanaDatasourceJsonData,
|
||||
GrafanaDatasourceSecureJsonData, GrafanaDatasourceSpec, GrafanaSpec,
|
||||
},
|
||||
crd_prometheuses::LabelSelector,
|
||||
prometheus_operator::prometheus_operator_helm_chart_score,
|
||||
rhob_alertmanager_config::RHOBObservability,
|
||||
service_monitor::ServiceMonitor,
|
||||
},
|
||||
},
|
||||
prometheus::{
|
||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||
prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
||||
prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
@@ -71,7 +88,6 @@ pub struct K8sAnywhereTopology {
|
||||
#[async_trait]
|
||||
impl K8sclient for K8sAnywhereTopology {
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||
trace!("getting k8s client");
|
||||
let state = match self.k8s_state.get() {
|
||||
Some(state) => state,
|
||||
None => return Err("K8s state not initialized yet".to_string()),
|
||||
@@ -87,41 +103,172 @@ impl K8sclient for K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
||||
impl Grafana for K8sAnywhereTopology {
|
||||
async fn ensure_grafana_operator(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
debug!("ensure grafana operator");
|
||||
let client = self.k8s_client().await.unwrap();
|
||||
let grafana_gvk = GroupVersionKind {
|
||||
group: "grafana.integreatly.org".to_string(),
|
||||
version: "v1beta1".to_string(),
|
||||
kind: "Grafana".to_string(),
|
||||
};
|
||||
let name = "grafanas.grafana.integreatly.org";
|
||||
let ns = "grafana";
|
||||
|
||||
let grafana_crd = client
|
||||
.get_resource_json_value(name, Some(ns), &grafana_gvk)
|
||||
.await;
|
||||
match grafana_crd {
|
||||
Ok(_) => {
|
||||
return Ok(PreparationOutcome::Success {
|
||||
details: "Found grafana CRDs in cluster".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
Err(_) => {
|
||||
return self
|
||||
.install_grafana_operator(inventory, Some("grafana"))
|
||||
.await;
|
||||
}
|
||||
};
|
||||
}
|
||||
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
let ns = "grafana";
|
||||
|
||||
let mut label = BTreeMap::new();
|
||||
|
||||
label.insert("dashboards".to_string(), "grafana".to_string());
|
||||
|
||||
let label_selector = LabelSelector {
|
||||
match_labels: label.clone(),
|
||||
match_expressions: vec![],
|
||||
};
|
||||
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
let grafana = self.build_grafana(ns, &label);
|
||||
|
||||
client.apply(&grafana, Some(ns)).await?;
|
||||
//TODO change this to a ensure ready or something better than just a timeout
|
||||
client
|
||||
.wait_until_deployment_ready(
|
||||
"grafana-grafana-deployment",
|
||||
Some("grafana"),
|
||||
Some(Duration::from_secs(30)),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let sa_name = "grafana-grafana-sa";
|
||||
let token_secret_name = "grafana-sa-token-secret";
|
||||
|
||||
let sa_token_secret = self.build_sa_token_secret(token_secret_name, sa_name, ns);
|
||||
|
||||
client.apply(&sa_token_secret, Some(ns)).await?;
|
||||
let secret_gvk = GroupVersionKind {
|
||||
group: "".to_string(),
|
||||
version: "v1".to_string(),
|
||||
kind: "Secret".to_string(),
|
||||
};
|
||||
|
||||
let secret = client
|
||||
.get_resource_json_value(token_secret_name, Some(ns), &secret_gvk)
|
||||
.await?;
|
||||
|
||||
let token = format!(
|
||||
"Bearer {}",
|
||||
self.extract_and_normalize_token(&secret).unwrap()
|
||||
);
|
||||
|
||||
debug!("creating grafana clusterrole binding");
|
||||
|
||||
let clusterrolebinding =
|
||||
self.build_cluster_rolebinding(sa_name, "cluster-monitoring-view", ns);
|
||||
|
||||
client.apply(&clusterrolebinding, Some(ns)).await?;
|
||||
|
||||
debug!("creating grafana datasource crd");
|
||||
|
||||
let thanos_url = format!(
|
||||
"https://{}",
|
||||
self.get_domain("thanos-querier-openshift-monitoring")
|
||||
.await
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let thanos_openshift_datasource = self.build_grafana_datasource(
|
||||
"thanos-openshift-monitoring",
|
||||
ns,
|
||||
&label_selector,
|
||||
&thanos_url,
|
||||
&token,
|
||||
);
|
||||
|
||||
client.apply(&thanos_openshift_datasource, Some(ns)).await?;
|
||||
|
||||
debug!("creating grafana dashboard crd");
|
||||
let dashboard = self.build_grafana_dashboard(ns, &label_selector);
|
||||
|
||||
client.apply(&dashboard, Some(ns)).await?;
|
||||
debug!("creating grafana ingress");
|
||||
let grafana_ingress = self.build_grafana_ingress(ns).await;
|
||||
|
||||
grafana_ingress
|
||||
.interpret(&Inventory::empty(), self)
|
||||
.await
|
||||
.map_err(|e| PreparationError::new(e.to_string()))?;
|
||||
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: "Installed grafana composants".to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PrometheusMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
||||
async fn install_prometheus(
|
||||
&self,
|
||||
sender: &CRDPrometheus,
|
||||
inventory: &Inventory,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||
_inventory: &Inventory,
|
||||
_receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
for monitor in sender.service_monitor.iter() {
|
||||
client
|
||||
.apply(monitor, Some(&sender.namespace))
|
||||
.await
|
||||
.map_err(|e| PreparationError::new(e.to_string()))?;
|
||||
}
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: "successfuly installed prometheus components".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn ensure_prometheus_operator(
|
||||
&self,
|
||||
sender: &CRDPrometheus,
|
||||
_inventory: &Inventory,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
let po_result = self.ensure_prometheus_operator(sender).await?;
|
||||
|
||||
if po_result == PreparationOutcome::Noop {
|
||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
||||
return Ok(po_result);
|
||||
}
|
||||
|
||||
let result = self
|
||||
.get_k8s_prometheus_application_score(sender.clone(), receivers)
|
||||
.await
|
||||
.interpret(inventory, self)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(outcome) => match outcome.status {
|
||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
||||
details: outcome.message,
|
||||
}),
|
||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
||||
_ => Err(PreparationError::new(outcome.message)),
|
||||
},
|
||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||
match po_result {
|
||||
PreparationOutcome::Success { details: _ } => {
|
||||
debug!("Detected prometheus crds operator present in cluster.");
|
||||
return Ok(po_result);
|
||||
}
|
||||
PreparationOutcome::Noop => {
|
||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
||||
return Ok(po_result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
||||
impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
||||
async fn install_prometheus(
|
||||
&self,
|
||||
sender: &RHOBObservability,
|
||||
@@ -155,6 +302,14 @@ impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology
|
||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn ensure_prometheus_operator(
|
||||
&self,
|
||||
sender: &RHOBObservability,
|
||||
inventory: &Inventory,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for K8sAnywhereTopology {
|
||||
@@ -216,6 +371,180 @@ impl K8sAnywhereTopology {
|
||||
.await
|
||||
}
|
||||
|
||||
fn extract_and_normalize_token(&self, secret: &DynamicObject) -> Option<String> {
|
||||
let token_b64 = secret
|
||||
.data
|
||||
.get("token")
|
||||
.or_else(|| secret.data.get("data").and_then(|d| d.get("token")))
|
||||
.and_then(|v| v.as_str())?;
|
||||
|
||||
let bytes = general_purpose::STANDARD.decode(token_b64).ok()?;
|
||||
|
||||
let s = String::from_utf8(bytes).ok()?;
|
||||
|
||||
let cleaned = s
|
||||
.trim_matches(|c: char| c.is_whitespace() || c == '\0')
|
||||
.to_string();
|
||||
Some(cleaned)
|
||||
}
|
||||
|
||||
pub fn build_cluster_rolebinding(
|
||||
&self,
|
||||
service_account_name: &str,
|
||||
clusterrole_name: &str,
|
||||
ns: &str,
|
||||
) -> ClusterRoleBinding {
|
||||
ClusterRoleBinding {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{}-view-binding", service_account_name)),
|
||||
..Default::default()
|
||||
},
|
||||
role_ref: RoleRef {
|
||||
api_group: "rbac.authorization.k8s.io".into(),
|
||||
kind: "ClusterRole".into(),
|
||||
name: clusterrole_name.into(),
|
||||
},
|
||||
subjects: Some(vec![Subject {
|
||||
kind: "ServiceAccount".into(),
|
||||
name: service_account_name.into(),
|
||||
namespace: Some(ns.into()),
|
||||
..Default::default()
|
||||
}]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_sa_token_secret(
|
||||
&self,
|
||||
secret_name: &str,
|
||||
service_account_name: &str,
|
||||
ns: &str,
|
||||
) -> Secret {
|
||||
let mut annotations = BTreeMap::new();
|
||||
annotations.insert(
|
||||
"kubernetes.io/service-account.name".to_string(),
|
||||
service_account_name.to_string(),
|
||||
);
|
||||
|
||||
Secret {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(secret_name.into()),
|
||||
namespace: Some(ns.into()),
|
||||
annotations: Some(annotations),
|
||||
..Default::default()
|
||||
},
|
||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn build_grafana_datasource(
|
||||
&self,
|
||||
name: &str,
|
||||
ns: &str,
|
||||
label_selector: &LabelSelector,
|
||||
url: &str,
|
||||
token: &str,
|
||||
) -> GrafanaDatasource {
|
||||
let mut json_data = BTreeMap::new();
|
||||
json_data.insert("timeInterval".to_string(), "5s".to_string());
|
||||
|
||||
GrafanaDatasource {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(name.to_string()),
|
||||
namespace: Some(ns.to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: GrafanaDatasourceSpec {
|
||||
instance_selector: label_selector.clone(),
|
||||
allow_cross_namespace_import: Some(true),
|
||||
values_from: None,
|
||||
datasource: GrafanaDatasourceConfig {
|
||||
access: "proxy".to_string(),
|
||||
name: name.to_string(),
|
||||
r#type: "prometheus".to_string(),
|
||||
url: url.to_string(),
|
||||
database: None,
|
||||
json_data: Some(GrafanaDatasourceJsonData {
|
||||
time_interval: Some("60s".to_string()),
|
||||
http_header_name1: Some("Authorization".to_string()),
|
||||
tls_skip_verify: Some(true),
|
||||
oauth_pass_thru: Some(true),
|
||||
}),
|
||||
secure_json_data: Some(GrafanaDatasourceSecureJsonData {
|
||||
http_header_value1: Some(format!("Bearer {token}")),
|
||||
}),
|
||||
is_default: Some(false),
|
||||
editable: Some(true),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn build_grafana_dashboard(
|
||||
&self,
|
||||
ns: &str,
|
||||
label_selector: &LabelSelector,
|
||||
) -> GrafanaDashboard {
|
||||
let graf_dashboard = GrafanaDashboard {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("grafana-dashboard-{}", ns)),
|
||||
namespace: Some(ns.to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: GrafanaDashboardSpec {
|
||||
resync_period: Some("30s".to_string()),
|
||||
instance_selector: label_selector.clone(),
|
||||
datasources: Some(vec![GrafanaDashboardDatasource {
|
||||
input_name: "DS_PROMETHEUS".to_string(),
|
||||
datasource_name: "thanos-openshift-monitoring".to_string(),
|
||||
}]),
|
||||
json: None,
|
||||
grafana_com: Some(GrafanaCom {
|
||||
id: 17406,
|
||||
revision: None,
|
||||
}),
|
||||
},
|
||||
};
|
||||
graf_dashboard
|
||||
}
|
||||
|
||||
fn build_grafana(&self, ns: &str, labels: &BTreeMap<String, String>) -> GrafanaCRD {
|
||||
let grafana = GrafanaCRD {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("grafana-{}", ns)),
|
||||
namespace: Some(ns.to_string()),
|
||||
labels: Some(labels.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: GrafanaSpec {
|
||||
config: None,
|
||||
admin_user: None,
|
||||
admin_password: None,
|
||||
ingress: None,
|
||||
persistence: None,
|
||||
resources: None,
|
||||
},
|
||||
};
|
||||
grafana
|
||||
}
|
||||
|
||||
async fn build_grafana_ingress(&self, ns: &str) -> K8sIngressScore {
|
||||
let domain = self.get_domain(&format!("grafana-{}", ns)).await.unwrap();
|
||||
let name = format!("{}-grafana", ns);
|
||||
let backend_service = format!("grafana-{}-service", ns);
|
||||
|
||||
K8sIngressScore {
|
||||
name: fqdn::fqdn!(&name),
|
||||
host: fqdn::fqdn!(&domain),
|
||||
backend_service: fqdn::fqdn!(&backend_service),
|
||||
port: 3000,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn::fqdn!(&ns)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
||||
&self,
|
||||
sender: RHOBObservability,
|
||||
@@ -233,13 +562,14 @@ impl K8sAnywhereTopology {
|
||||
&self,
|
||||
sender: CRDPrometheus,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||
service_monitors: Option<Vec<ServiceMonitor>>,
|
||||
) -> K8sPrometheusCRDAlertingScore {
|
||||
K8sPrometheusCRDAlertingScore {
|
||||
return K8sPrometheusCRDAlertingScore {
|
||||
sender,
|
||||
receivers: receivers.unwrap_or_default(),
|
||||
service_monitors: vec![],
|
||||
service_monitors: service_monitors.unwrap_or_default(),
|
||||
prometheus_rules: vec![],
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
||||
@@ -507,6 +837,30 @@ impl K8sAnywhereTopology {
|
||||
details: "prometheus operator present in cluster".into(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn install_grafana_operator(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
ns: Option<&str>,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
let namespace = ns.unwrap_or("grafana");
|
||||
info!("installing grafana operator in ns {namespace}");
|
||||
let tenant = self.get_k8s_tenant_manager()?.get_tenant_config().await;
|
||||
let mut namespace_scope = false;
|
||||
if tenant.is_some() {
|
||||
namespace_scope = true;
|
||||
}
|
||||
let _grafana_operator_score = grafana_helm_chart_score(namespace, namespace_scope)
|
||||
.interpret(inventory, self)
|
||||
.await
|
||||
.map_err(|e| PreparationError::new(e.to_string()));
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: format!(
|
||||
"Successfully installed grafana operator in ns {}",
|
||||
ns.unwrap()
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -621,56 +975,36 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
use log::{trace, debug, warn};
|
||||
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
// Local developer UX
|
||||
return Ok(format!("{service}.local.k3d"));
|
||||
}
|
||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
||||
K8sSource::Kubeconfig => {
|
||||
trace!("K8sSource is kubeconfig; attempting to detect domain");
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
|
||||
// 1) Try OpenShift IngressController domain (backward compatible)
|
||||
if self.openshift_ingress_operator_available().await.is_ok() {
|
||||
trace!("OpenShift ingress operator detected; using IngressController");
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await
|
||||
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
||||
})?;
|
||||
|
||||
if let Some(domain) = ic.data["status"]["domain"].as_str() {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
} else {
|
||||
warn!("OpenShift IngressController present but no status.domain set");
|
||||
}
|
||||
} else {
|
||||
trace!("OpenShift ingress operator not detected; trying generic Kubernetes");
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
||||
}
|
||||
|
||||
// 2) Try NGINX Ingress Controller common setups
|
||||
// 2.a) Well-known namespace/name for the controller Service
|
||||
// - upstream default: namespace "ingress-nginx", service "ingress-nginx-controller"
|
||||
// - some distros: "ingress-nginx-controller" svc in "ingress-nginx" ns
|
||||
// If found with LoadBalancer ingress hostname, use its base domain.
|
||||
if let Some(domain) = try_nginx_lb_domain(&client).await? {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
}
|
||||
|
||||
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
|
||||
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
|
||||
warn!("Could not determine external ingress domain; falling back to internal-only DNS");
|
||||
let internal = format!("{service}.default.svc.cluster.local");
|
||||
Ok(internal)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -680,57 +1014,3 @@ impl Ingress for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
|
||||
use log::{trace, debug};
|
||||
|
||||
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
|
||||
let svc_gvk = GroupVersionKind {
|
||||
group: "".into(), // core
|
||||
version: "v1".into(),
|
||||
kind: "Service".into(),
|
||||
};
|
||||
|
||||
let candidates = [
|
||||
("ingress-nginx", "ingress-nginx-controller"),
|
||||
("ingress-nginx", "ingress-nginx-controller-internal"),
|
||||
("ingress-nginx", "ingress-nginx"), // some charts name the svc like this
|
||||
("kube-system", "ingress-nginx-controller"), // less common but seen
|
||||
];
|
||||
|
||||
for (ns, name) in candidates {
|
||||
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
|
||||
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await {
|
||||
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default();
|
||||
for entry in lb_hosts {
|
||||
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
|
||||
debug!("Found NGINX LB hostname: {host}");
|
||||
if let Some(domain) = extract_base_domain(host) {
|
||||
return Ok(Some(domain.to_string()));
|
||||
} else {
|
||||
return Ok(Some(host.to_string())); // already a domain
|
||||
}
|
||||
}
|
||||
if let Some(ip) = entry.get("ip").and_then(|v| v.as_str()) {
|
||||
// If only an IP is exposed, we can't create a hostname; return None to keep searching
|
||||
debug!("NGINX LB exposes IP {ip} (no hostname); skipping");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn extract_base_domain(host: &str) -> Option<String> {
|
||||
// For a host like a1b2c3d4e5f6abcdef.elb.amazonaws.com -> base domain elb.amazonaws.com
|
||||
// For a managed DNS like xyz.example.com -> base domain example.com (keep 2+ labels)
|
||||
// Heuristic: keep last 2 labels by default; special-case known multi-label TLDs if needed.
|
||||
let parts: Vec<&str> = host.split('.').collect();
|
||||
if parts.len() >= 2 {
|
||||
// Very conservative: last 2 labels
|
||||
Some(parts[parts.len() - 2..].join("."))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ impl TopologyState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum DeploymentTarget {
|
||||
LocalDev,
|
||||
Staging,
|
||||
|
||||
@@ -7,14 +7,16 @@ use std::{
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use brocade::PortOperatingMode;
|
||||
use derive_new::new;
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
net::{IpAddress, MacAddress},
|
||||
switch::PortLocation,
|
||||
};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{executors::ExecutorError, hardware::PhysicalHost};
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
use super::{LogicalHost, k8s::K8sClient};
|
||||
|
||||
@@ -182,6 +184,39 @@ impl FromStr for DnsRecordType {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait NetworkManager: Debug + Send + Sync {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError>;
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, new)]
|
||||
pub struct NetworkError {
|
||||
msg: String,
|
||||
}
|
||||
|
||||
impl fmt::Display for NetworkError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(&self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for NetworkError {}
|
||||
|
||||
impl From<kube::Error> for NetworkError {
|
||||
fn from(value: kube::Error) -> Self {
|
||||
NetworkError::new(value.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for NetworkError {
|
||||
fn from(value: String) -> Self {
|
||||
NetworkError::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
pub type PortConfig = (PortLocation, PortOperatingMode);
|
||||
|
||||
#[async_trait]
|
||||
pub trait Switch: Send + Sync {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
||||
@@ -191,15 +226,14 @@ pub trait Switch: Send + Sync {
|
||||
mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError>;
|
||||
|
||||
async fn configure_host_network(
|
||||
&self,
|
||||
host: &PhysicalHost,
|
||||
config: HostNetworkConfig,
|
||||
) -> Result<(), SwitchError>;
|
||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct HostNetworkConfig {
|
||||
pub host_id: Id,
|
||||
pub switch_ports: Vec<SwitchPort>,
|
||||
}
|
||||
|
||||
@@ -254,6 +288,9 @@ pub trait SwitchClient: Debug + Send + Sync {
|
||||
channel_name: &str,
|
||||
switch_ports: Vec<PortLocation>,
|
||||
) -> Result<u8, SwitchError>;
|
||||
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -31,6 +31,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
debug!("hit sender configure for AlertingInterpret");
|
||||
self.sender.configure(inventory, topology).await?;
|
||||
for receiver in self.receivers.iter() {
|
||||
receiver.install(&self.sender).await?;
|
||||
@@ -86,4 +87,5 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
#[async_trait]
|
||||
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
||||
fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>;
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ use k8s_openapi::{
|
||||
},
|
||||
apimachinery::pkg::util::intstr::IntOrString,
|
||||
};
|
||||
use kube::Resource;
|
||||
use kube::{Resource, api::DynamicObject};
|
||||
use log::debug;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
net::{IpAddress, MacAddress},
|
||||
switch::{PortDeclaration, PortLocation},
|
||||
};
|
||||
use option_ext::OptionExt;
|
||||
|
||||
use crate::topology::{SwitchClient, SwitchError};
|
||||
use crate::topology::{PortConfig, SwitchClient, SwitchError};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeSwitchClient {
|
||||
@@ -18,9 +19,9 @@ impl BrocadeSwitchClient {
|
||||
ip_addresses: &[IpAddress],
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Self, brocade::Error> {
|
||||
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
|
||||
let brocade = brocade::init(ip_addresses, username, password, options).await?;
|
||||
Ok(Self { brocade })
|
||||
}
|
||||
}
|
||||
@@ -59,7 +60,7 @@ impl SwitchClient for BrocadeSwitchClient {
|
||||
}
|
||||
|
||||
self.brocade
|
||||
.configure_interfaces(interfaces)
|
||||
.configure_interfaces(&interfaces)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
|
||||
@@ -111,6 +112,24 @@ impl SwitchClient for BrocadeSwitchClient {
|
||||
|
||||
Ok(channel_id)
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
for i in ids {
|
||||
self.brocade
|
||||
.clear_port_channel(&i.to_string())
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
// FIXME hardcoded TenGigabitEthernet = bad
|
||||
let ports = ports.iter().map(|p| (format!("TenGigabitEthernet {}", p.0), p.1.clone())).collect();
|
||||
self.brocade
|
||||
.configure_interfaces(&ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -147,8 +166,8 @@ mod tests {
|
||||
|
||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||
assert_that!(*configured_interfaces).contains_exactly(vec![
|
||||
(first_interface.name.clone(), PortOperatingMode::Access),
|
||||
(second_interface.name.clone(), PortOperatingMode::Access),
|
||||
(first_interface.port_location, PortOperatingMode::Access),
|
||||
(second_interface.port_location, PortOperatingMode::Access),
|
||||
]);
|
||||
}
|
||||
|
||||
@@ -255,10 +274,10 @@ mod tests {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
||||
*configured_interfaces = interfaces;
|
||||
*configured_interfaces = interfaces.clone();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
|
||||
impl InventoryRepositoryFactory {
|
||||
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
||||
Ok(Box::new(
|
||||
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
|
||||
SqliteInventoryRepository::new(&DATABASE_URL).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
182
harmony/src/infra/kube.rs
Normal file
182
harmony/src/infra/kube.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use k8s_openapi::Resource as K8sResource;
|
||||
use kube::api::{ApiResource, DynamicObject, GroupVersionKind};
|
||||
use kube::core::TypeMeta;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
|
||||
/// Convert a typed Kubernetes resource `K` into a `DynamicObject`.
|
||||
///
|
||||
/// Requirements:
|
||||
/// - `K` must be a k8s_openapi resource (provides static GVK via `Resource`).
|
||||
/// - `K` must have standard Kubernetes shape (metadata + payload fields).
|
||||
///
|
||||
/// Notes:
|
||||
/// - We set `types` (apiVersion/kind) and copy `metadata`.
|
||||
/// - We place the remaining top-level fields into `obj.data` as JSON.
|
||||
/// - Scope is not encoded on the object itself; you still need the corresponding
|
||||
/// `DynamicResource` (derived from K::group/version/kind) when constructing an Api.
|
||||
///
|
||||
/// Example usage:
|
||||
/// let dyn_obj = kube_resource_to_dynamic(secret)?;
|
||||
/// let api: Api<DynamicObject> = Api::namespaced_with(client, "ns", &dr);
|
||||
/// api.patch(&dyn_obj.name_any(), &PatchParams::apply("mgr"), &Patch::Apply(dyn_obj)).await?;
|
||||
pub fn kube_resource_to_dynamic<K>(res: &K) -> Result<DynamicObject, String>
|
||||
where
|
||||
K: K8sResource + Serialize + DeserializeOwned,
|
||||
{
|
||||
// Serialize the typed resource to JSON so we can split metadata and payload
|
||||
let mut v = serde_json::to_value(res).map_err(|e| format!("Failed to serialize : {e}"))?;
|
||||
let obj = v
|
||||
.as_object_mut()
|
||||
.ok_or_else(|| "expected object JSON".to_string())?;
|
||||
|
||||
// Extract and parse metadata into kube::core::ObjectMeta
|
||||
let metadata_value = obj
|
||||
.remove("metadata")
|
||||
.ok_or_else(|| "missing metadata".to_string())?;
|
||||
let metadata: kube::core::ObjectMeta = serde_json::from_value(metadata_value)
|
||||
.map_err(|e| format!("Failed to deserialize : {e}"))?;
|
||||
|
||||
// Name is required for DynamicObject::new; prefer metadata.name
|
||||
let name = metadata
|
||||
.name
|
||||
.clone()
|
||||
.ok_or_else(|| "metadata.name is required".to_string())?;
|
||||
|
||||
// Remaining fields (spec/status/data/etc.) become the dynamic payload
|
||||
let payload = Value::Object(obj.clone());
|
||||
|
||||
// Construct the DynamicObject
|
||||
let mut dyn_obj = DynamicObject::new(
|
||||
&name,
|
||||
&ApiResource::from_gvk(&GroupVersionKind::gvk(K::GROUP, K::VERSION, K::KIND)),
|
||||
);
|
||||
dyn_obj.types = Some(TypeMeta {
|
||||
api_version: api_version_for::<K>(),
|
||||
kind: K::KIND.into(),
|
||||
});
|
||||
|
||||
// Preserve namespace/labels/annotations/etc.
|
||||
dyn_obj.metadata = metadata;
|
||||
|
||||
// Attach payload
|
||||
dyn_obj.data = payload;
|
||||
|
||||
Ok(dyn_obj)
|
||||
}
|
||||
|
||||
/// Helper: compute apiVersion string ("group/version" or "v1" for core).
|
||||
fn api_version_for<K>() -> String
|
||||
where
|
||||
K: K8sResource,
|
||||
{
|
||||
let group = K::GROUP;
|
||||
let version = K::VERSION;
|
||||
if group.is_empty() {
|
||||
version.to_string() // core/v1 => "v1"
|
||||
} else {
|
||||
format!("{}/{}", group, version)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use k8s_openapi::api::{
|
||||
apps::v1::{Deployment, DeploymentSpec},
|
||||
core::v1::{PodTemplateSpec, Secret},
|
||||
};
|
||||
use kube::api::ObjectMeta;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn secret_to_dynamic_roundtrip() {
|
||||
// Create a sample Secret resource
|
||||
let mut secret = Secret {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("my-secret".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Convert to DynamicResource
|
||||
let dynamic: DynamicObject =
|
||||
kube_resource_to_dynamic(&secret).expect("Failed to convert Secret to DynamicResource");
|
||||
|
||||
// Serialize both the original and dynamic resources to Value
|
||||
let original_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||
let dynamic_value =
|
||||
serde_json::to_value(&dynamic).expect("Failed to serialize DynamicResource");
|
||||
|
||||
// Assert that they are identical
|
||||
assert_eq!(original_value, dynamic_value);
|
||||
|
||||
secret.metadata.namespace = Some("false".to_string());
|
||||
let modified_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||
assert_ne!(modified_value, dynamic_value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deployment_to_dynamic_roundtrip() {
|
||||
// Create a sample Deployment with nested structures
|
||||
let deployment = Deployment {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("my-deployment".to_string()),
|
||||
labels: Some({
|
||||
let mut map = std::collections::BTreeMap::new();
|
||||
map.insert("app".to_string(), "nginx".to_string());
|
||||
map
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
spec: Some(DeploymentSpec {
|
||||
replicas: Some(3),
|
||||
selector: Default::default(),
|
||||
template: PodTemplateSpec {
|
||||
metadata: Some(ObjectMeta {
|
||||
labels: Some({
|
||||
let mut map = std::collections::BTreeMap::new();
|
||||
map.insert("app".to_string(), "nginx".to_string());
|
||||
map
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
spec: Some(Default::default()), // PodSpec with empty containers for simplicity
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let dynamic = kube_resource_to_dynamic(&deployment).expect("Failed to convert Deployment");
|
||||
|
||||
let original_value = serde_json::to_value(&deployment).unwrap();
|
||||
let dynamic_value = serde_json::to_value(&dynamic).unwrap();
|
||||
|
||||
assert_eq!(original_value, dynamic_value);
|
||||
|
||||
assert_eq!(
|
||||
dynamic.data.get("spec").unwrap().get("replicas").unwrap(),
|
||||
3
|
||||
);
|
||||
assert_eq!(
|
||||
dynamic
|
||||
.data
|
||||
.get("spec")
|
||||
.unwrap()
|
||||
.get("template")
|
||||
.unwrap()
|
||||
.get("metadata")
|
||||
.unwrap()
|
||||
.get("labels")
|
||||
.unwrap()
|
||||
.get("app")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
"nginx".to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -3,5 +3,7 @@ pub mod executors;
|
||||
pub mod hp_ilo;
|
||||
pub mod intel_amt;
|
||||
pub mod inventory;
|
||||
pub mod kube;
|
||||
pub mod network_manager;
|
||||
pub mod opnsense;
|
||||
mod sqlx;
|
||||
|
||||
259
harmony/src/infra/network_manager.rs
Normal file
259
harmony/src/infra/network_manager.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
use std::{
|
||||
collections::{BTreeMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use k8s_openapi::api::core::v1::Node;
|
||||
use kube::{
|
||||
ResourceExt,
|
||||
api::{ObjectList, ObjectMeta},
|
||||
};
|
||||
use log::{debug, info};
|
||||
|
||||
use crate::{
|
||||
modules::okd::crd::nmstate,
|
||||
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
||||
};
|
||||
|
||||
pub struct OpenShiftNmStateNetworkManager {
|
||||
k8s_client: Arc<K8sClient>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for OpenShiftNmStateNetworkManager {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("OpenShiftNmStateNetworkManager").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||
debug!("Installing NMState controller...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState namespace...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState service account...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState role...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState role binding...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState operator...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
self.k8s_client
|
||||
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
||||
.await?;
|
||||
|
||||
let nmstate = nmstate::NMState {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("nmstate".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
debug!(
|
||||
"Creating NMState:\n{}",
|
||||
serde_yaml::to_string(&nmstate).unwrap()
|
||||
);
|
||||
self.k8s_client.apply(&nmstate, None).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||
let hostname = self.get_hostname(&config.host_id).await.map_err(|e| {
|
||||
NetworkError::new(format!(
|
||||
"Can't configure bond, can't get hostname for host '{}': {e}",
|
||||
config.host_id
|
||||
))
|
||||
})?;
|
||||
let bond_id = self.get_next_bond_id(&hostname).await.map_err(|e| {
|
||||
NetworkError::new(format!(
|
||||
"Can't configure bond, can't get an available bond id for host '{}': {e}",
|
||||
config.host_id
|
||||
))
|
||||
})?;
|
||||
let bond_config = self.create_bond_configuration(&hostname, &bond_id, config);
|
||||
|
||||
debug!(
|
||||
"Applying NMState bond config for host {}:\n{}",
|
||||
config.host_id,
|
||||
serde_yaml::to_string(&bond_config).unwrap(),
|
||||
);
|
||||
self.k8s_client
|
||||
.apply(&bond_config, None)
|
||||
.await
|
||||
.map_err(|e| NetworkError::new(format!("Failed to configure bond: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenShiftNmStateNetworkManager {
|
||||
pub fn new(k8s_client: Arc<K8sClient>) -> Self {
|
||||
Self { k8s_client }
|
||||
}
|
||||
|
||||
fn create_bond_configuration(
|
||||
&self,
|
||||
host: &str,
|
||||
bond_name: &str,
|
||||
config: &HostNetworkConfig,
|
||||
) -> nmstate::NodeNetworkConfigurationPolicy {
|
||||
info!("Configuring bond '{bond_name}' for host '{host}'...");
|
||||
|
||||
let mut bond_mtu: Option<u32> = None;
|
||||
let mut copy_mac_from: Option<String> = None;
|
||||
let mut bond_ports = Vec::new();
|
||||
let mut interfaces: Vec<nmstate::Interface> = Vec::new();
|
||||
|
||||
for switch_port in &config.switch_ports {
|
||||
let interface_name = switch_port.interface.name.clone();
|
||||
|
||||
interfaces.push(nmstate::Interface {
|
||||
name: interface_name.clone(),
|
||||
description: Some(format!("Member of bond {bond_name}")),
|
||||
r#type: nmstate::InterfaceType::Ethernet,
|
||||
state: "up".to_string(),
|
||||
mtu: Some(switch_port.interface.mtu),
|
||||
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: None,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
bond_ports.push(interface_name.clone());
|
||||
|
||||
// Use the first port's details for the bond mtu and mac address
|
||||
if bond_mtu.is_none() {
|
||||
bond_mtu = Some(switch_port.interface.mtu);
|
||||
}
|
||||
if copy_mac_from.is_none() {
|
||||
copy_mac_from = Some(interface_name);
|
||||
}
|
||||
}
|
||||
|
||||
interfaces.push(nmstate::Interface {
|
||||
name: bond_name.to_string(),
|
||||
description: Some(format!("Network bond for host {host}")),
|
||||
r#type: nmstate::InterfaceType::Bond,
|
||||
state: "up".to_string(),
|
||||
copy_mac_from,
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
autoconf: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: Some(nmstate::BondSpec {
|
||||
mode: "802.3ad".to_string(),
|
||||
ports: bond_ports,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
nmstate::NodeNetworkConfigurationPolicy {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{host}-bond-config")),
|
||||
..Default::default()
|
||||
},
|
||||
spec: nmstate::NodeNetworkConfigurationPolicySpec {
|
||||
node_selector: Some(BTreeMap::from([(
|
||||
"kubernetes.io/hostname".to_string(),
|
||||
host.to_string(),
|
||||
)])),
|
||||
desired_state: nmstate::NetworkState {
|
||||
interfaces,
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_hostname(&self, host_id: &Id) -> Result<String, String> {
|
||||
let nodes: ObjectList<Node> = self
|
||||
.k8s_client
|
||||
.list_resources(None, None)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||
|
||||
let Some(node) = nodes.iter().find(|n| {
|
||||
n.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.node_info.as_ref())
|
||||
.map(|i| i.system_uuid == host_id.to_string())
|
||||
.unwrap_or(false)
|
||||
}) else {
|
||||
return Err(format!("No node found for host '{host_id}'"));
|
||||
};
|
||||
|
||||
node.labels()
|
||||
.get("kubernetes.io/hostname")
|
||||
.ok_or(format!(
|
||||
"Node '{host_id}' has no kubernetes.io/hostname label"
|
||||
))
|
||||
.cloned()
|
||||
}
|
||||
|
||||
async fn get_next_bond_id(&self, hostname: &str) -> Result<String, String> {
|
||||
let network_state: Option<nmstate::NodeNetworkState> = self
|
||||
.k8s_client
|
||||
.get_resource(hostname, None)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||
|
||||
let interfaces = vec![];
|
||||
let existing_bonds: Vec<&nmstate::Interface> = network_state
|
||||
.as_ref()
|
||||
.and_then(|network_state| network_state.status.current_state.as_ref())
|
||||
.map_or(&interfaces, |current_state| ¤t_state.interfaces)
|
||||
.iter()
|
||||
.filter(|i| i.r#type == nmstate::InterfaceType::Bond)
|
||||
.collect();
|
||||
|
||||
let used_ids: HashSet<u32> = existing_bonds
|
||||
.iter()
|
||||
.filter_map(|i| {
|
||||
i.name
|
||||
.strip_prefix("bond")
|
||||
.and_then(|id| id.parse::<u32>().ok())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let next_id = (0..).find(|id| !used_ids.contains(id)).unwrap();
|
||||
Ok(format!("bond{next_id}"))
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ use super::OPNSenseFirewall;
|
||||
|
||||
#[async_trait]
|
||||
impl DnsServer for OPNSenseFirewall {
|
||||
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||
todo!("Refactor this to use dnsmasq")
|
||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
// let mut dns = writable_opnsense.dns();
|
||||
@@ -68,7 +68,7 @@ impl DnsServer for OPNSenseFirewall {
|
||||
self.host.clone()
|
||||
}
|
||||
|
||||
async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
|
||||
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
|
||||
todo!("Refactor this to use dnsmasq")
|
||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
// let mut dns = writable_opnsense.dns();
|
||||
|
||||
@@ -8,7 +8,6 @@ mod tftp;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use management::*;
|
||||
use opnsense_config_xml::Host;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{executors::ExecutorError, topology::LogicalHost};
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct Helm {
|
||||
pub skip_schema_validation: Option<bool>,
|
||||
pub version: Option<String>,
|
||||
pub kube_version: Option<String>,
|
||||
// pub api_versions: Vec<String>,
|
||||
pub api_versions: Vec<String>,
|
||||
pub namespace: Option<String>,
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
},
|
||||
@@ -181,11 +181,13 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
}
|
||||
|
||||
impl ArgoApplication {
|
||||
pub fn to_yaml(&self, target_namespace: Option<&str>) -> serde_yaml::Value {
|
||||
pub fn to_yaml(&self) -> serde_yaml::Value {
|
||||
let name = &self.name;
|
||||
let default_ns = "argocd".to_string();
|
||||
let namespace: &str =
|
||||
target_namespace.unwrap_or(self.namespace.as_ref().unwrap_or(&default_ns));
|
||||
let namespace = if let Some(ns) = self.namespace.as_ref() {
|
||||
ns
|
||||
} else {
|
||||
"argocd"
|
||||
};
|
||||
let project = &self.project;
|
||||
|
||||
let yaml_str = format!(
|
||||
@@ -283,7 +285,7 @@ mod tests {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -343,7 +345,7 @@ spec:
|
||||
|
||||
assert_eq!(
|
||||
expected_yaml_output.trim(),
|
||||
serde_yaml::to_string(&app.clone().to_yaml(None))
|
||||
serde_yaml::to_string(&app.clone().to_yaml())
|
||||
.unwrap()
|
||||
.trim()
|
||||
);
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_macros::hurl;
|
||||
use kube::{Api, api::GroupVersionKind};
|
||||
use log::{debug, info, trace, warn};
|
||||
use log::{debug, warn};
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::{process::Command, str::FromStr, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
argocd::{ArgoDeploymentType, detect_argo_deployment_type},
|
||||
helm::chart::{HelmChartScore, HelmRepository},
|
||||
},
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
|
||||
topology::{
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
||||
k8s::K8sClient,
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
@@ -24,7 +25,6 @@ use super::ArgoApplication;
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct ArgoHelmScore {
|
||||
pub namespace: String,
|
||||
// TODO: remove and rely on topology (it now knows the flavor)
|
||||
pub openshift: bool,
|
||||
pub argo_apps: Vec<ArgoApplication>,
|
||||
}
|
||||
@@ -55,101 +55,29 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
trace!("Starting ArgoInterpret execution {self:?}");
|
||||
let k8s_client: Arc<K8sClient> = topology.k8s_client().await?;
|
||||
trace!("Got k8s client");
|
||||
let desired_ns = self.score.namespace.clone();
|
||||
|
||||
debug!("ArgoInterpret detecting cluster configuration");
|
||||
let svc = format!("argo-{}", desired_ns);
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let svc = format!("argo-{}", self.score.namespace.clone());
|
||||
let domain = topology.get_domain(&svc).await?;
|
||||
debug!("Resolved Argo service domain for '{}': {}", svc, domain);
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
|
||||
// Detect current Argo deployment type
|
||||
let current = detect_argo_deployment_type(&k8s_client, &desired_ns).await?;
|
||||
info!("Detected Argo deployment type: {:?}", current);
|
||||
helm_score.interpret(inventory, topology).await?;
|
||||
|
||||
// Decide control namespace and whether we must install
|
||||
let (control_ns, must_install) = match current.clone() {
|
||||
ArgoDeploymentType::NotInstalled => {
|
||||
info!(
|
||||
"Argo CD not installed. Will install via Helm into namespace '{}'.",
|
||||
desired_ns
|
||||
);
|
||||
(desired_ns.clone(), true)
|
||||
}
|
||||
ArgoDeploymentType::AvailableInDesiredNamespace(ns) => {
|
||||
info!(
|
||||
"Argo CD already installed by Harmony in '{}'. Skipping install.",
|
||||
ns
|
||||
);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledClusterWide(ns) => {
|
||||
info!(
|
||||
"Argo CD installed cluster-wide in namespace '{}'.",
|
||||
ns
|
||||
);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {
|
||||
// TODO we could support this use case by installing a new argo instance. But that
|
||||
// means handling a few cases that are out of scope for now :
|
||||
// - Wether argo operator is installed
|
||||
// - Managing CRD versions compatibility
|
||||
// - Potentially handling the various k8s flavors and setups we might encounter
|
||||
//
|
||||
// There is a possibility that the helm chart already handles most or even all of these use cases but they are out of scope for now.
|
||||
let msg = format!(
|
||||
"Argo CD found in '{}' but it is namespace-scoped and not supported for attachment yet.",
|
||||
ns
|
||||
);
|
||||
warn!("{}", msg);
|
||||
return Err(InterpretError::new(msg));
|
||||
}
|
||||
};
|
||||
|
||||
info!("ArgoCD will be installed : {must_install} . Current argocd status : {current:?} ");
|
||||
|
||||
if must_install {
|
||||
let helm_score = argo_helm_chart_score(&desired_ns, self.score.openshift, &domain);
|
||||
info!(
|
||||
"Installing Argo CD via Helm into namespace '{}' ...",
|
||||
desired_ns
|
||||
);
|
||||
helm_score.interpret(inventory, topology).await?;
|
||||
info!("Argo CD install complete in '{}'.", desired_ns);
|
||||
}
|
||||
|
||||
let yamls: Vec<serde_yaml::Value> = self
|
||||
.argo_apps
|
||||
.iter()
|
||||
.map(|a| a.to_yaml(Some(&control_ns)))
|
||||
.collect();
|
||||
info!(
|
||||
"Applying {} Argo application object(s) into control namespace '{}'.",
|
||||
yamls.len(),
|
||||
control_ns
|
||||
);
|
||||
k8s_client
|
||||
.apply_yaml_many(&yamls, Some(control_ns.as_str()))
|
||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Failed applying Argo CRs: {e}")))?;
|
||||
.unwrap();
|
||||
|
||||
Ok(Outcome::success_with_details(
|
||||
format!(
|
||||
"ArgoCD {} {}",
|
||||
self.argo_apps.len(),
|
||||
if self.argo_apps.len() == 1 {
|
||||
"application"
|
||||
} else {
|
||||
"applications"
|
||||
match self.argo_apps.len() {
|
||||
1 => "application",
|
||||
_ => "applications",
|
||||
}
|
||||
),
|
||||
vec![
|
||||
format!("control_namespace={}", control_ns),
|
||||
format!("argo ui: http://{}", domain),
|
||||
],
|
||||
vec![format!("argo application: http://{}", domain)],
|
||||
))
|
||||
}
|
||||
|
||||
@@ -158,7 +86,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
Version::from("0.1.0").unwrap()
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
@@ -166,7 +94,39 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl ArgoInterpret {
|
||||
pub async fn get_host_domain(
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
openshift: bool,
|
||||
) -> Result<String, InterpretError> {
|
||||
//This should be the job of the topology to determine if we are in
|
||||
//openshift, potentially we need on openshift topology the same way we create a
|
||||
//localhosttopology
|
||||
match openshift {
|
||||
true => {
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => return Ok(domain.to_string()),
|
||||
None => return Err(InterpretError::new("Could not find domain".to_string())),
|
||||
}
|
||||
}
|
||||
false => {
|
||||
todo!()
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,11 @@ use crate::modules::application::{
|
||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||
};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::grafana::grafana::Grafana;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
|
||||
ServiceMonitor, ServiceMonitorSpec,
|
||||
};
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
@@ -14,7 +18,7 @@ use crate::{
|
||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||
};
|
||||
use crate::{
|
||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
modules::prometheus::prometheus::PrometheusMonitoring,
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
@@ -22,6 +26,7 @@ use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_secret_derive::Secret;
|
||||
use harmony_types::net::Url;
|
||||
use kube::api::ObjectMeta;
|
||||
use log::{debug, info};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
@@ -40,7 +45,8 @@ impl<
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
||||
+ PrometheusMonitoring<CRDPrometheus>
|
||||
+ Grafana
|
||||
+ Ingress
|
||||
+ std::fmt::Debug,
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
@@ -57,10 +63,20 @@ impl<
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||
|
||||
let app_service_monitor = ServiceMonitor {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.application.name()),
|
||||
namespace: Some(namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: ServiceMonitorSpec::default(),
|
||||
};
|
||||
|
||||
let mut alerting_score = ApplicationMonitoringScore {
|
||||
sender: CRDPrometheus {
|
||||
namespace: namespace.clone(),
|
||||
client: topology.k8s_client().await.unwrap(),
|
||||
service_monitor: vec![app_service_monitor],
|
||||
},
|
||||
application: self.application.clone(),
|
||||
receivers: self.alert_receiver.clone(),
|
||||
|
||||
@@ -10,11 +10,12 @@ use crate::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::application::{
|
||||
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
|
||||
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -46,11 +47,11 @@ use crate::{
|
||||
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
||||
/// - Kubernetes for runtime orchestration
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
|
||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
|
||||
pub application: Arc<A>,
|
||||
}
|
||||
|
||||
impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
|
||||
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
||||
async fn deploy_to_local_k3d(
|
||||
&self,
|
||||
app_name: String,
|
||||
@@ -136,7 +137,7 @@ impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
|
||||
|
||||
#[async_trait]
|
||||
impl<
|
||||
A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
|
||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||
> ApplicationFeature<T> for PackagingDeployment<A>
|
||||
{
|
||||
@@ -145,15 +146,10 @@ impl<
|
||||
topology: &T,
|
||||
) -> Result<InstallationOutcome, InstallationError> {
|
||||
let image = self.application.image_name();
|
||||
|
||||
let domain = if topology.current_target() == DeploymentTarget::Production {
|
||||
self.application.dns()
|
||||
} else {
|
||||
topology
|
||||
let domain = topology
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?
|
||||
};
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
@@ -198,7 +194,7 @@ impl<
|
||||
openshift: true,
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.2.1").unwrap(),
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||
values_overrides: None,
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::sync::Arc;
|
||||
use crate::modules::application::{
|
||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||
};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
@@ -17,7 +18,7 @@ use crate::{
|
||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||
};
|
||||
use crate::{
|
||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
modules::prometheus::prometheus::PrometheusMonitoring,
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
@@ -41,7 +42,7 @@ impl<
|
||||
+ MultiTargetTopology
|
||||
+ Ingress
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||
+ PrometheusMonitoring<RHOBObservability>,
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(
|
||||
|
||||
@@ -2,7 +2,6 @@ mod feature;
|
||||
pub mod features;
|
||||
pub mod oci;
|
||||
mod rust;
|
||||
mod webapp;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use feature::*;
|
||||
|
||||
@@ -16,7 +16,6 @@ use tar::{Builder, Header};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::modules::application::webapp::Webapp;
|
||||
use crate::{score::Score, topology::Topology};
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
@@ -61,10 +60,6 @@ pub struct RustWebapp {
|
||||
pub project_root: PathBuf,
|
||||
pub service_port: u32,
|
||||
pub framework: Option<RustWebFramework>,
|
||||
/// Host name that will be used in production environment.
|
||||
///
|
||||
/// This is the place to put the public host name if this is a public facing webapp.
|
||||
pub dns: String,
|
||||
}
|
||||
|
||||
impl Application for RustWebapp {
|
||||
@@ -73,12 +68,6 @@ impl Application for RustWebapp {
|
||||
}
|
||||
}
|
||||
|
||||
impl Webapp for RustWebapp {
|
||||
fn dns(&self) -> String {
|
||||
self.dns.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(
|
||||
@@ -205,10 +194,10 @@ impl RustWebapp {
|
||||
Some(body_full(tar_data.into())),
|
||||
);
|
||||
|
||||
while let Some(msg) = image_build_stream.next().await {
|
||||
while let Some(mut msg) = image_build_stream.next().await {
|
||||
trace!("Got bollard msg {msg:?}");
|
||||
match msg {
|
||||
Ok(msg) => {
|
||||
Ok(mut msg) => {
|
||||
if let Some(progress) = msg.progress_detail {
|
||||
info!(
|
||||
"Build progress {}/{}",
|
||||
@@ -268,6 +257,7 @@ impl RustWebapp {
|
||||
".harmony_generated",
|
||||
"harmony",
|
||||
"node_modules",
|
||||
"Dockerfile.harmony",
|
||||
];
|
||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||
.into_iter()
|
||||
@@ -471,53 +461,52 @@ impl RustWebapp {
|
||||
|
||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||
|
||||
let app_name = &self.name;
|
||||
let service_port = self.service_port;
|
||||
// Create Chart.yaml
|
||||
let chart_yaml = format!(
|
||||
r#"
|
||||
apiVersion: v2
|
||||
name: {chart_name}
|
||||
description: A Helm chart for the {app_name} web application.
|
||||
name: {}
|
||||
description: A Helm chart for the {} web application.
|
||||
type: application
|
||||
version: 0.2.1
|
||||
appVersion: "{image_tag}"
|
||||
version: 0.1.0
|
||||
appVersion: "{}"
|
||||
"#,
|
||||
chart_name, self.name, image_tag
|
||||
);
|
||||
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
|
||||
|
||||
// Create values.yaml
|
||||
let values_yaml = format!(
|
||||
r#"
|
||||
# Default values for {chart_name}.
|
||||
# Default values for {}.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: {image_repo}
|
||||
repository: {}
|
||||
pullPolicy: IfNotPresent
|
||||
# Overridden by the chart's appVersion
|
||||
tag: "{image_tag}"
|
||||
tag: "{}"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: {service_port}
|
||||
port: {}
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
tls: true
|
||||
# Annotations for cert-manager to handle SSL.
|
||||
annotations:
|
||||
# Add other annotations like nginx ingress class if needed
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
hosts:
|
||||
- host: {domain}
|
||||
- host: {}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
"#,
|
||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
||||
);
|
||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||
|
||||
@@ -594,11 +583,7 @@ spec:
|
||||
);
|
||||
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
||||
|
||||
let service_port = self.service_port;
|
||||
|
||||
// Create templates/ingress.yaml
|
||||
// TODO get issuer name and tls config from topology as it may be different from one
|
||||
// cluster to another, also from one version to another
|
||||
let ingress_yaml = format!(
|
||||
r#"
|
||||
{{{{- if $.Values.ingress.enabled -}}}}
|
||||
@@ -611,11 +596,13 @@ metadata:
|
||||
spec:
|
||||
{{{{- if $.Values.ingress.tls }}}}
|
||||
tls:
|
||||
- secretName: {{{{ include "chart.fullname" . }}}}-tls
|
||||
hosts:
|
||||
{{{{- range $.Values.ingress.hosts }}}}
|
||||
- {{{{ .host | quote }}}}
|
||||
{{{{- range $.Values.ingress.tls }}}}
|
||||
- hosts:
|
||||
{{{{- range .hosts }}}}
|
||||
- {{{{ . | quote }}}}
|
||||
{{{{- end }}}}
|
||||
secretName: {{{{ .secretName }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
rules:
|
||||
{{{{- range $.Values.ingress.hosts }}}}
|
||||
@@ -629,11 +616,12 @@ spec:
|
||||
service:
|
||||
name: {{{{ include "chart.fullname" $ }}}}
|
||||
port:
|
||||
number: {{{{ $.Values.service.port | default {service_port} }}}}
|
||||
number: {{{{ $.Values.service.port | default {} }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
"#,
|
||||
self.service_port
|
||||
);
|
||||
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
use super::Application;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Webapp: Application {
|
||||
fn dns(&self) -> String;
|
||||
}
|
||||
@@ -1,203 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use log::{debug, info};
|
||||
|
||||
use crate::{interpret::InterpretError, topology::k8s::K8sClient};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ArgoScope {
|
||||
ClusterWide(String),
|
||||
NamespaceScoped(String),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DiscoveredArgo {
|
||||
pub control_namespace: String,
|
||||
pub scope: ArgoScope,
|
||||
pub has_crds: bool,
|
||||
pub has_applicationset: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ArgoDeploymentType {
|
||||
NotInstalled,
|
||||
AvailableInDesiredNamespace(String),
|
||||
InstalledClusterWide(String),
|
||||
InstalledNamespaceScoped(String),
|
||||
}
|
||||
|
||||
pub async fn discover_argo_all(
|
||||
k8s: &Arc<K8sClient>,
|
||||
) -> Result<Vec<DiscoveredArgo>, InterpretError> {
|
||||
use log::{debug, info, trace, warn};
|
||||
|
||||
trace!("Starting Argo discovery");
|
||||
|
||||
// CRDs
|
||||
let mut has_crds = true;
|
||||
let required_crds = vec!["applications.argoproj.io", "appprojects.argoproj.io"];
|
||||
trace!("Checking required Argo CRDs: {:?}", required_crds);
|
||||
|
||||
for crd in required_crds {
|
||||
trace!("Verifying CRD presence: {crd}");
|
||||
let crd_exists = k8s.has_crd(crd).await.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to verify existence of CRD {crd}: {e}"))
|
||||
})?;
|
||||
|
||||
debug!("CRD {crd} exists: {crd_exists}");
|
||||
if !crd_exists {
|
||||
info!(
|
||||
"Missing Argo CRD {crd}, looks like Argo CD is not installed (or partially installed)"
|
||||
);
|
||||
has_crds = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/part-of=argocd"
|
||||
);
|
||||
let mut candidate_namespaces = k8s
|
||||
.list_namespaces_with_healthy_deployments("app.kubernetes.io/part-of=argocd")
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?;
|
||||
trace!(
|
||||
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/name=argo-cd"
|
||||
);
|
||||
candidate_namespaces.append(
|
||||
&mut k8s
|
||||
.list_namespaces_with_healthy_deployments("app.kubernetes.io/name=argo-cd")
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?,
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Discovered {} candidate namespace(s) for Argo CD: {:?}",
|
||||
candidate_namespaces.len(),
|
||||
candidate_namespaces
|
||||
);
|
||||
|
||||
let mut found = Vec::new();
|
||||
for ns in candidate_namespaces {
|
||||
trace!("Evaluating namespace '{ns}' for Argo CD instance");
|
||||
|
||||
// Require the application-controller to be healthy (sanity check)
|
||||
trace!(
|
||||
"Checking healthy deployment with label app.kubernetes.io/name=argocd-application-controller in namespace '{ns}'"
|
||||
);
|
||||
let controller_ok = k8s
|
||||
.has_healthy_deployment_with_label(
|
||||
&ns,
|
||||
"app.kubernetes.io/name=argocd-application-controller",
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(
|
||||
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||
);
|
||||
false
|
||||
}) || k8s
|
||||
.has_healthy_deployment_with_label(
|
||||
&ns,
|
||||
"app.kubernetes.io/component=controller",
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(
|
||||
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||
);
|
||||
false
|
||||
});
|
||||
debug!("Namespace '{ns}': application-controller healthy = {controller_ok}");
|
||||
|
||||
if !controller_ok {
|
||||
trace!("Skipping namespace '{ns}' because application-controller is not healthy");
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
|
||||
let scope = match k8s.is_argocd_cluster_wide(&ns).await {
|
||||
Ok(true) => {
|
||||
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
|
||||
ArgoScope::ClusterWide(ns.to_string())
|
||||
}
|
||||
Ok(false) => {
|
||||
debug!("Namespace '{ns}' identified as namespace-scoped Argo CD control plane");
|
||||
ArgoScope::NamespaceScoped(ns.to_string())
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to determine Argo CD scope for namespace '{ns}': {e}. Assuming namespace-scoped."
|
||||
);
|
||||
ArgoScope::NamespaceScoped(ns.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
trace!("Checking optional ApplicationSet CRD (applicationsets.argoproj.io)");
|
||||
let has_applicationset = match k8s.has_crd("applicationsets.argoproj.io").await {
|
||||
Ok(v) => {
|
||||
debug!("applicationsets.argoproj.io present: {v}");
|
||||
v
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to check applicationsets.argoproj.io CRD: {e}. Assuming absent.");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
let argo = DiscoveredArgo {
|
||||
control_namespace: ns.clone(),
|
||||
scope,
|
||||
has_crds,
|
||||
has_applicationset,
|
||||
};
|
||||
|
||||
debug!("Discovered Argo instance in '{ns}': {argo:?}");
|
||||
found.push(argo);
|
||||
}
|
||||
|
||||
if found.is_empty() {
|
||||
info!("No Argo CD installations discovered");
|
||||
} else {
|
||||
info!(
|
||||
"Argo CD discovery complete: {} instance(s) found",
|
||||
found.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(found)
|
||||
}
|
||||
|
||||
pub async fn detect_argo_deployment_type(
|
||||
k8s: &Arc<K8sClient>,
|
||||
desired_namespace: &str,
|
||||
) -> Result<ArgoDeploymentType, InterpretError> {
|
||||
let discovered = discover_argo_all(k8s).await?;
|
||||
debug!("Discovered argo instances {discovered:?}");
|
||||
|
||||
if discovered.is_empty() {
|
||||
return Ok(ArgoDeploymentType::NotInstalled);
|
||||
}
|
||||
|
||||
if let Some(d) = discovered
|
||||
.iter()
|
||||
.find(|d| d.control_namespace == desired_namespace)
|
||||
{
|
||||
return Ok(ArgoDeploymentType::AvailableInDesiredNamespace(
|
||||
d.control_namespace.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(d) = discovered
|
||||
.iter()
|
||||
.find(|d| matches!(d.scope, ArgoScope::ClusterWide(_)))
|
||||
{
|
||||
return Ok(ArgoDeploymentType::InstalledClusterWide(
|
||||
d.control_namespace.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(ArgoDeploymentType::InstalledNamespaceScoped(
|
||||
discovered[0].control_namespace.clone(),
|
||||
))
|
||||
}
|
||||
@@ -19,8 +19,11 @@ pub struct DhcpScore {
|
||||
pub host_binding: Vec<HostBinding>,
|
||||
pub next_server: Option<IpAddress>,
|
||||
pub boot_filename: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as BIOS
|
||||
pub filename: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as uefi but NOT iPXE
|
||||
pub filename64: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as iPXE
|
||||
pub filenameipxe: Option<String>,
|
||||
pub dhcp_range: (IpAddress, IpAddress),
|
||||
pub domain: Option<String>,
|
||||
|
||||
@@ -5,11 +5,10 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::LaunchDiscoverInventoryAgentScore,
|
||||
modules::inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
@@ -17,11 +16,13 @@ use crate::{
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DiscoverHostForRoleScore {
|
||||
pub role: HostRole,
|
||||
pub number_desired_hosts: i16,
|
||||
pub discovery_strategy : HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for DiscoverHostForRoleScore {
|
||||
fn name(&self) -> String {
|
||||
"DiscoverInventoryAgentScore".to_string()
|
||||
format!("DiscoverHostForRoleScore({:?})", self.role)
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
@@ -48,13 +49,15 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
);
|
||||
LaunchDiscoverInventoryAgentScore {
|
||||
discovery_timeout: None,
|
||||
discovery_strategy: self.score.discovery_strategy.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
let host: PhysicalHost;
|
||||
let mut chosen_hosts = vec![];
|
||||
let host_repo = InventoryRepositoryFactory::build().await?;
|
||||
|
||||
let mut assigned_hosts = 0;
|
||||
loop {
|
||||
let all_hosts = host_repo.get_all_hosts().await?;
|
||||
|
||||
@@ -74,12 +77,25 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
|
||||
match ans {
|
||||
Ok(choice) => {
|
||||
info!("Selected {} as the bootstrap node.", choice.summary());
|
||||
info!(
|
||||
"Assigned role {:?} for node {}",
|
||||
self.score.role,
|
||||
choice.summary()
|
||||
);
|
||||
host_repo
|
||||
.save_role_mapping(&self.score.role, &choice)
|
||||
.await?;
|
||||
host = choice;
|
||||
break;
|
||||
chosen_hosts.push(choice);
|
||||
assigned_hosts += 1;
|
||||
|
||||
info!(
|
||||
"Found {assigned_hosts} hosts for role {:?}",
|
||||
self.score.role
|
||||
);
|
||||
|
||||
if assigned_hosts == self.score.number_desired_hosts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(inquire::InquireError::OperationCanceled) => {
|
||||
info!("Refresh requested. Fetching list of discovered hosts again...");
|
||||
@@ -90,17 +106,19 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
"Failed to select node for role {:?} : {}",
|
||||
self.score.role, e
|
||||
);
|
||||
return Err(InterpretError::new(format!(
|
||||
"Could not select host : {}",
|
||||
e.to_string()
|
||||
)));
|
||||
return Err(InterpretError::new(format!("Could not select host : {e}")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Successfully discovered host {} for role {:?}",
|
||||
host.summary(),
|
||||
"Successfully discovered {} hosts {} for role {:?}",
|
||||
self.score.number_desired_hosts,
|
||||
chosen_hosts
|
||||
.iter()
|
||||
.map(|h| h.summary())
|
||||
.collect::<Vec<String>>()
|
||||
.join(", "),
|
||||
self.score.role
|
||||
)))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
mod discovery;
|
||||
pub mod inspect;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use cidr::{Ipv4Cidr, Ipv4Inet};
|
||||
pub use discovery::*;
|
||||
use tokio::time::{Duration, timeout};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_inventory_agent::local_presence::DiscoveryEvent;
|
||||
@@ -24,6 +28,7 @@ use harmony_types::id::Id;
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LaunchDiscoverInventoryAgentScore {
|
||||
pub discovery_timeout: Option<u64>,
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore {
|
||||
@@ -43,6 +48,12 @@ struct DiscoverInventoryAgentInterpret {
|
||||
score: LaunchDiscoverInventoryAgentScore,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum HarmonyDiscoveryStrategy {
|
||||
MDNS,
|
||||
SUBNET { cidr: cidr::Ipv4Cidr, port: u16 },
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
async fn execute(
|
||||
@@ -57,6 +68,37 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
),
|
||||
};
|
||||
|
||||
match self.score.discovery_strategy {
|
||||
HarmonyDiscoveryStrategy::MDNS => self.launch_mdns_discovery().await,
|
||||
HarmonyDiscoveryStrategy::SUBNET { cidr, port } => {
|
||||
self.launch_cidr_discovery(&cidr, port).await
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Outcome::success(
|
||||
"Discovery process completed successfully".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::DiscoverInventoryAgent
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscoverInventoryAgentInterpret {
|
||||
async fn launch_mdns_discovery(&self) {
|
||||
harmony_inventory_agent::local_presence::discover_agents(
|
||||
self.score.discovery_timeout,
|
||||
|event: DiscoveryEvent| -> Result<(), String> {
|
||||
@@ -90,12 +132,12 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
// refactoring to do it now
|
||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||
storage_drives,
|
||||
storage_controller: _,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset: _,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface: _,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
} = host;
|
||||
|
||||
@@ -112,6 +154,8 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
cpus,
|
||||
};
|
||||
|
||||
// FIXME only save the host when it is new or something changed in it.
|
||||
// we currently are saving the host every time it is discovered.
|
||||
let repo = InventoryRepositoryFactory::build()
|
||||
.await
|
||||
.map_err(|e| format!("Could not build repository : {e}"))
|
||||
@@ -132,25 +176,111 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.await;
|
||||
Ok(Outcome::success(
|
||||
"Discovery process completed successfully".to_string(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::DiscoverInventoryAgent
|
||||
}
|
||||
// async fn launch_cidr_discovery(&self, cidr : &Ipv4Cidr, port: u16) {
|
||||
// todo!("launnch cidr discovery for {cidr} : {port}
|
||||
// - Iterate over all possible addresses in cidr
|
||||
// - make calls in batches of 20 attempting to reach harmony inventory agent on <addr, port> using same as above harmony_inventory_agent::client::get_host_inventory(&address, port)
|
||||
// - Log warn when response is 404, it means the port was used by something else unexpected
|
||||
// - Log error when response is 5xx
|
||||
// - Log debug when no response (timeout 15 seconds)
|
||||
// - Log info when found and response is 2xx
|
||||
// ");
|
||||
// }
|
||||
async fn launch_cidr_discovery(&self, cidr: &Ipv4Cidr, port: u16) {
|
||||
let addrs: Vec<Ipv4Inet> = cidr.iter().collect();
|
||||
let total = addrs.len();
|
||||
info!(
|
||||
"Starting CIDR discovery for {} hosts on {}/{} (port {})",
|
||||
total,
|
||||
cidr.network_length(),
|
||||
cidr,
|
||||
port
|
||||
);
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
let batch_size: usize = 20;
|
||||
let timeout_secs = 5;
|
||||
let request_timeout = Duration::from_secs(timeout_secs);
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
let mut current_batch = 0;
|
||||
let num_batches = addrs.len() / batch_size;
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
for batch in addrs.chunks(batch_size) {
|
||||
current_batch += 1;
|
||||
info!("Starting query batch {current_batch} of {num_batches}, timeout {timeout_secs}");
|
||||
let mut tasks = Vec::with_capacity(batch.len());
|
||||
|
||||
for addr in batch {
|
||||
let addr = addr.address().to_string();
|
||||
let port = port;
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
match timeout(
|
||||
request_timeout,
|
||||
harmony_inventory_agent::client::get_host_inventory(&addr, port),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(host)) => {
|
||||
info!("Found and response is 2xx for {addr}:{port}");
|
||||
|
||||
// Reuse the same conversion to PhysicalHost as MDNS flow
|
||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
} = host;
|
||||
|
||||
let host = PhysicalHost {
|
||||
id: Id::from(host_uuid),
|
||||
category: HostCategory::Server,
|
||||
network: network_interfaces,
|
||||
storage: storage_drives,
|
||||
labels: vec![Label {
|
||||
name: "discovered-by".to_string(),
|
||||
value: "harmony-inventory-agent".to_string(),
|
||||
}],
|
||||
memory_modules,
|
||||
cpus,
|
||||
};
|
||||
|
||||
// Save host to inventory
|
||||
let repo = InventoryRepositoryFactory::build()
|
||||
.await
|
||||
.map_err(|e| format!("Could not build repository : {e}"))
|
||||
.unwrap();
|
||||
if let Err(e) = repo.save(&host).await {
|
||||
log::debug!("Failed to save host {}: {e}", host.id);
|
||||
} else {
|
||||
info!("Saved host id {}, summary : {}", host.id, host.summary());
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
log::info!("Error querying inventory agent on {addr}:{port} : {e}");
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout for this host
|
||||
log::debug!("No response (timeout) for {addr}:{port}");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
// Wait for this batch to complete
|
||||
for t in tasks {
|
||||
let _ = t.await;
|
||||
}
|
||||
}
|
||||
|
||||
info!("CIDR discovery completed");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,13 +38,15 @@ impl<
|
||||
+ 'static
|
||||
+ Send
|
||||
+ Clone,
|
||||
T: Topology,
|
||||
T: Topology + K8sclient,
|
||||
> Score<T> for K8sResourceScore<K>
|
||||
where
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
todo!()
|
||||
Box::new(K8sResourceInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
|
||||
@@ -17,4 +17,3 @@ pub mod prometheus;
|
||||
pub mod storage;
|
||||
pub mod tenant;
|
||||
pub mod tftp;
|
||||
pub mod argocd;
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
interpret::Interpret,
|
||||
modules::{
|
||||
application::Application,
|
||||
monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
||||
prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
monitoring::{
|
||||
grafana::grafana::Grafana, kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
||||
},
|
||||
prometheus::prometheus::PrometheusMonitoring,
|
||||
},
|
||||
score::Score,
|
||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||
topology::{
|
||||
K8sclient, Topology,
|
||||
oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ApplicationMonitoringScore {
|
||||
@@ -24,12 +26,16 @@ pub struct ApplicationMonitoringScore {
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
||||
}
|
||||
|
||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
||||
impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Score<T>
|
||||
for ApplicationMonitoringScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(ApplicationMonitoringInterpret {
|
||||
score: self.clone(),
|
||||
debug!("creating alerting interpret");
|
||||
Box::new(AlertingInterpret {
|
||||
sender: self.sender.clone(),
|
||||
receivers: self.receivers.clone(),
|
||||
rules: vec![],
|
||||
scrape_targets: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,55 +46,3 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ApplicationMonitoringInterpret {
|
||||
score: ApplicationMonitoringScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
||||
for ApplicationMonitoringInterpret
|
||||
{
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let result = topology
|
||||
.install_prometheus(
|
||||
&self.score.sender,
|
||||
inventory,
|
||||
Some(self.score.receivers.clone()),
|
||||
)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(outcome) => match outcome {
|
||||
PreparationOutcome::Success { details: _ } => {
|
||||
Ok(Outcome::success("Prometheus installed".into()))
|
||||
}
|
||||
PreparationOutcome::Noop => {
|
||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
||||
}
|
||||
},
|
||||
Err(err) => Err(InterpretError::from(err)),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::ApplicationMonitoring
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{
|
||||
monitoring::kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
||||
},
|
||||
prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
prometheus::prometheus::PrometheusMonitoring,
|
||||
},
|
||||
score::Score,
|
||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||
@@ -26,7 +26,7 @@ pub struct ApplicationRHOBMonitoringScore {
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||
}
|
||||
|
||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
||||
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Score<T>
|
||||
for ApplicationRHOBMonitoringScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
@@ -49,7 +49,7 @@ pub struct ApplicationRHOBMonitoringInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
||||
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Interpret<T>
|
||||
for ApplicationRHOBMonitoringInterpret
|
||||
{
|
||||
async fn execute(
|
||||
|
||||
17
harmony/src/modules/monitoring/grafana/grafana.rs
Normal file
17
harmony/src/modules/monitoring/grafana/grafana.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use async_trait::async_trait;
|
||||
use k8s_openapi::Resource;
|
||||
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
topology::{PreparationError, PreparationOutcome},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Grafana {
|
||||
async fn ensure_grafana_operator(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
) -> Result<PreparationOutcome, PreparationError>;
|
||||
|
||||
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError>;
|
||||
}
|
||||
@@ -1,27 +1,28 @@
|
||||
use harmony_macros::hurl;
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use std::str::FromStr;
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use crate::modules::helm::chart::HelmChartScore;
|
||||
|
||||
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
|
||||
let values = r#"
|
||||
rbac:
|
||||
namespaced: true
|
||||
sidecar:
|
||||
dashboards:
|
||||
enabled: true
|
||||
"#
|
||||
.to_string();
|
||||
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||
|
||||
pub fn grafana_helm_chart_score(ns: &str, namespace_scope: bool) -> HelmChartScore {
|
||||
let mut values_overrides = HashMap::new();
|
||||
values_overrides.insert(
|
||||
NonBlankString::from_str("namespaceScope").unwrap(),
|
||||
namespace_scope.to_string(),
|
||||
);
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
||||
release_name: NonBlankString::from_str("grafana").unwrap(),
|
||||
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
|
||||
release_name: NonBlankString::from_str("grafana-operator").unwrap(),
|
||||
chart_name: NonBlankString::from_str("grafana/grafana-operator").unwrap(),
|
||||
chart_version: None,
|
||||
values_overrides: None,
|
||||
values_yaml: Some(values.to_string()),
|
||||
values_overrides: Some(values_overrides),
|
||||
values_yaml: None,
|
||||
create_namespace: true,
|
||||
install_only: true,
|
||||
repository: None,
|
||||
repository: Some(HelmRepository::new(
|
||||
"grafana".to_string(),
|
||||
hurl!("https://grafana.github.io/helm-charts"),
|
||||
true,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
pub mod grafana;
|
||||
pub mod helm;
|
||||
|
||||
@@ -1,12 +1,25 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::topology::{
|
||||
k8s::K8sClient,
|
||||
oberservability::monitoring::{AlertReceiver, AlertSender},
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
monitoring::{
|
||||
grafana::grafana::Grafana, kube_prometheus::crd::service_monitor::ServiceMonitor,
|
||||
},
|
||||
prometheus::prometheus::PrometheusMonitoring,
|
||||
},
|
||||
topology::{
|
||||
K8sclient, Topology,
|
||||
installable::Installable,
|
||||
k8s::K8sClient,
|
||||
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
@@ -26,6 +39,7 @@ pub struct AlertmanagerConfigSpec {
|
||||
pub struct CRDPrometheus {
|
||||
pub namespace: String,
|
||||
pub client: Arc<K8sClient>,
|
||||
pub service_monitor: Vec<ServiceMonitor>,
|
||||
}
|
||||
|
||||
impl AlertSender for CRDPrometheus {
|
||||
@@ -40,6 +54,12 @@ impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn ScrapeTarget<CRDPrometheus>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
@@ -48,3 +68,24 @@ impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus> + Grafana> Installable<T>
|
||||
for CRDPrometheus
|
||||
{
|
||||
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
topology.ensure_grafana_operator(inventory).await?;
|
||||
topology.ensure_prometheus_operator(self, inventory).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<(), InterpretError> {
|
||||
topology.install_grafana().await?;
|
||||
topology.install_prometheus(&self, inventory, None).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,9 +103,34 @@ pub struct GrafanaDashboardSpec {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub resync_period: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub datasources: Option<Vec<GrafanaDashboardDatasource>>,
|
||||
|
||||
pub instance_selector: LabelSelector,
|
||||
|
||||
pub json: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub json: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub grafana_com: Option<GrafanaCom>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaDashboardDatasource {
|
||||
pub input_name: String,
|
||||
pub datasource_name: String,
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaCom {
|
||||
pub id: u32,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub revision: Option<u32>,
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
@@ -126,20 +151,79 @@ pub struct GrafanaDatasourceSpec {
|
||||
pub allow_cross_namespace_import: Option<bool>,
|
||||
|
||||
pub datasource: GrafanaDatasourceConfig,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub values_from: Option<Vec<GrafanaValueFrom>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaValueFrom {
|
||||
pub target_path: String,
|
||||
pub value_from: GrafanaValueSource,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaValueSource {
|
||||
pub secret_key_ref: GrafanaSecretKeyRef,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaSecretKeyRef {
|
||||
pub name: String,
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaDatasourceConfig {
|
||||
pub access: String,
|
||||
pub database: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub json_data: Option<BTreeMap<String, String>>,
|
||||
pub database: Option<String>,
|
||||
pub name: String,
|
||||
pub r#type: String,
|
||||
pub url: String,
|
||||
/// Represents jsonData in the GrafanaDatasource spec
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub json_data: Option<GrafanaDatasourceJsonData>,
|
||||
|
||||
/// Represents secureJsonData (secrets)
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub secure_json_data: Option<GrafanaDatasourceSecureJsonData>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub is_default: Option<bool>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub editable: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaDatasourceJsonData {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub time_interval: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub http_header_name1: Option<String>,
|
||||
|
||||
/// Disable TLS skip verification (false = verify)
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub tls_skip_verify: Option<bool>,
|
||||
|
||||
/// Auth type - set to "forward" for OpenShift OAuth identity
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub oauth_pass_thru: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaDatasourceSecureJsonData {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub http_header_value1: Option<String>,
|
||||
}
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
LabelSelector, PrometheusSpec,
|
||||
};
|
||||
|
||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
|
||||
@@ -100,11 +100,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
||||
|
||||
info!("deploying ntfy...");
|
||||
client
|
||||
.wait_until_deployment_ready(
|
||||
"ntfy".to_string(),
|
||||
Some(self.score.namespace.as_str()),
|
||||
None,
|
||||
)
|
||||
.wait_until_deployment_ready("ntfy", Some(self.score.namespace.as_str()), None)
|
||||
.await?;
|
||||
info!("ntfy deployed");
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ impl Prometheus {
|
||||
};
|
||||
|
||||
if let Some(ns) = namespace.as_deref() {
|
||||
grafana_helm_chart_score(ns)
|
||||
grafana_helm_chart_score(ns, false)
|
||||
.interpret(inventory, topology)
|
||||
.await
|
||||
} else {
|
||||
|
||||
@@ -73,4 +73,8 @@ impl ScrapeTarget<CRDPrometheus> for Server {
|
||||
self.name.clone()
|
||||
)))
|
||||
}
|
||||
|
||||
fn clone_box(&self) -> Box<dyn ScrapeTarget<CRDPrometheus>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::{
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::DiscoverHostForRoleScore,
|
||||
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -104,6 +104,8 @@ When you can dig them, confirm to continue.
|
||||
bootstrap_host = hosts.into_iter().next().to_owned();
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::Bootstrap,
|
||||
number_desired_hosts: 1,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
@@ -5,10 +5,8 @@ use crate::{
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::IPxeMacBootFileScore,
|
||||
inventory::DiscoverHostForRoleScore,
|
||||
okd::{host_network::HostNetworkConfigurationScore, templates::BootstrapIpxeTpl},
|
||||
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
||||
inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy}, okd::templates::BootstrapIpxeTpl,
|
||||
},
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
@@ -60,38 +58,39 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||
const REQUIRED_HOSTS: usize = 3;
|
||||
const REQUIRED_HOSTS: i16 = 3;
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
|
||||
while control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
info!(
|
||||
"Discovery of {} control plane hosts in progress, current number {}",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
);
|
||||
// This score triggers the discovery agent for a specific role.
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::ControlPlane,
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
info!(
|
||||
"Discovery of {} control plane hosts in progress, current number {}",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
);
|
||||
// This score triggers the discovery agent for a specific role.
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::ControlPlane,
|
||||
number_desired_hosts: REQUIRED_HOSTS,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
Err(InterpretError::new(format!(
|
||||
let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
|
||||
if control_plane_hosts.len() < REQUIRED_HOSTS as usize {
|
||||
return Err(InterpretError::new(format!(
|
||||
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
)))
|
||||
} else {
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(control_plane_hosts
|
||||
.into_iter()
|
||||
.take(REQUIRED_HOSTS)
|
||||
.collect())
|
||||
)));
|
||||
}
|
||||
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(control_plane_hosts
|
||||
.into_iter()
|
||||
.take(REQUIRED_HOSTS as usize)
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Configures DHCP host bindings for all control plane nodes.
|
||||
@@ -205,28 +204,6 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Placeholder for automating network bonding configuration.
|
||||
async fn persist_network_bond(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
hosts: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!("[ControlPlane] Ensuring persistent bonding");
|
||||
let score = HostNetworkConfigurationScore {
|
||||
hosts: hosts.clone(),
|
||||
};
|
||||
score.interpret(inventory, topology).await?;
|
||||
|
||||
inquire::Confirm::new(
|
||||
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
|
||||
)
|
||||
.prompt()
|
||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -265,10 +242,6 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
||||
// 4. Reboot the nodes to start the OS installation.
|
||||
self.reboot_targets(&nodes).await?;
|
||||
|
||||
// 5. Placeholder for post-boot network configuration (e.g., bonding).
|
||||
self.persist_network_bond(inventory, topology, &nodes)
|
||||
.await?;
|
||||
|
||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||
// and for the cluster operators to become available. This would be similar to
|
||||
// the `wait-for bootstrap-complete` command.
|
||||
|
||||
130
harmony/src/modules/okd/bootstrap_persist_network_bond.rs
Normal file
130
harmony/src/modules/okd/bootstrap_persist_network_bond.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::okd::host_network::HostNetworkConfigurationScore,
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// Persist Network Bond
|
||||
// - Persist bonding via NMState
|
||||
// - Persist port channels on the Switch
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Serialize, new)]
|
||||
pub struct OKDSetupPersistNetworkBondScore {}
|
||||
|
||||
impl Score<HAClusterTopology> for OKDSetupPersistNetworkBondScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||
Box::new(OKDSetupPersistNetworkBondInterpet::new())
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"OKDSetupPersistNetworkBondScore".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OKDSetupPersistNetworkBondInterpet {
|
||||
version: Version,
|
||||
status: InterpretStatus,
|
||||
}
|
||||
|
||||
impl OKDSetupPersistNetworkBondInterpet {
|
||||
pub fn new() -> Self {
|
||||
let version = Version::from("1.0.0").unwrap();
|
||||
Self {
|
||||
version,
|
||||
status: InterpretStatus::QUEUED,
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
||||
/// It will trigger discovery if not enough hosts are found.
|
||||
async fn get_nodes(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &HAClusterTopology,
|
||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||
const REQUIRED_HOSTS: usize = 3;
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
|
||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
Err(InterpretError::new(format!(
|
||||
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
)))
|
||||
} else {
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(control_plane_hosts
|
||||
.into_iter()
|
||||
.take(REQUIRED_HOSTS)
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
async fn persist_network_bond(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
hosts: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!("Ensuring persistent bonding");
|
||||
|
||||
let score = HostNetworkConfigurationScore {
|
||||
hosts: hosts.clone(),
|
||||
};
|
||||
score.interpret(inventory, topology).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Interpret<HAClusterTopology> for OKDSetupPersistNetworkBondInterpet {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OKDSetupPersistNetworkBondInterpet")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
self.version.clone()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let nodes = self.get_nodes(inventory, topology).await?;
|
||||
|
||||
let res = self.persist_network_bond(inventory, topology, &nodes).await;
|
||||
|
||||
match res {
|
||||
Ok(_) => Ok(Outcome::success(
|
||||
"Network bond successfully persisted".into(),
|
||||
)),
|
||||
Err(_) => Err(InterpretError::new(
|
||||
"Failed to persist network bond".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,41 +1 @@
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod nmstate;
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[kube(
|
||||
group = "operators.coreos.com",
|
||||
version = "v1",
|
||||
kind = "OperatorGroup",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct OperatorGroupSpec {
|
||||
pub target_namespaces: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[kube(
|
||||
group = "operators.coreos.com",
|
||||
version = "v1alpha1",
|
||||
kind = "Subscription",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SubscriptionSpec {
|
||||
pub name: String,
|
||||
pub source: String,
|
||||
pub source_namespace: String,
|
||||
pub channel: Option<String>,
|
||||
pub install_plan_approval: Option<InstallPlanApproval>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
pub enum InstallPlanApproval {
|
||||
#[serde(rename = "Automatic")]
|
||||
Automatic,
|
||||
#[serde(rename = "Manual")]
|
||||
Manual,
|
||||
}
|
||||
|
||||
@@ -1,14 +1,22 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use k8s_openapi::{ClusterResourceScope, Resource};
|
||||
use kube::{CustomResource, api::ObjectMeta};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[kube(group = "nmstate.io", version = "v1", kind = "NMState", namespaced)]
|
||||
#[kube(
|
||||
group = "nmstate.io",
|
||||
version = "v1",
|
||||
kind = "NMState",
|
||||
plural = "nmstates",
|
||||
namespaced = false
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NMStateSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub probe_configuration: Option<ProbeConfig>,
|
||||
}
|
||||
|
||||
@@ -40,55 +48,350 @@ pub struct ProbeDns {
|
||||
group = "nmstate.io",
|
||||
version = "v1",
|
||||
kind = "NodeNetworkConfigurationPolicy",
|
||||
namespaced
|
||||
namespaced = false
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeNetworkConfigurationPolicySpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node_selector: Option<BTreeMap<String, String>>,
|
||||
pub desired_state: DesiredStateSpec,
|
||||
pub desired_state: NetworkState,
|
||||
}
|
||||
|
||||
// Currently, kube-rs derive doesn't support resources without a `spec` field, so we have
|
||||
// to implement it ourselves.
|
||||
//
|
||||
// Ref:
|
||||
// - https://github.com/kube-rs/kube/issues/1763
|
||||
// - https://github.com/kube-rs/kube/discussions/1762
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeNetworkState {
|
||||
metadata: ObjectMeta,
|
||||
pub status: NodeNetworkStateStatus,
|
||||
}
|
||||
|
||||
impl Resource for NodeNetworkState {
|
||||
const API_VERSION: &'static str = "nmstate.io/v1beta1";
|
||||
const GROUP: &'static str = "nmstate.io";
|
||||
const VERSION: &'static str = "v1beta1";
|
||||
const KIND: &'static str = "NodeNetworkState";
|
||||
const URL_PATH_SEGMENT: &'static str = "nodenetworkstates";
|
||||
type Scope = ClusterResourceScope;
|
||||
}
|
||||
|
||||
impl k8s_openapi::Metadata for NodeNetworkState {
|
||||
type Ty = ObjectMeta;
|
||||
|
||||
fn metadata(&self) -> &Self::Ty {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeNetworkStateStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub current_state: Option<NetworkState>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub handler_nmstate_version: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub host_network_manager_version: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_successful_update_time: Option<String>,
|
||||
}
|
||||
|
||||
/// The NetworkState is the top-level struct, representing the entire
|
||||
/// desired or current network state.
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DesiredStateSpec {
|
||||
pub interfaces: Vec<InterfaceSpec>,
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct NetworkState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub hostname: Option<HostNameState>,
|
||||
#[serde(rename = "dns-resolver", skip_serializing_if = "Option::is_none")]
|
||||
pub dns: Option<DnsState>,
|
||||
#[serde(rename = "route-rules", skip_serializing_if = "Option::is_none")]
|
||||
pub rules: Option<RouteRuleState>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub routes: Option<RouteState>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub interfaces: Vec<Interface>,
|
||||
#[serde(rename = "ovs-db", skip_serializing_if = "Option::is_none")]
|
||||
pub ovsdb: Option<OvsDbGlobalConfig>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ovn: Option<OvnConfiguration>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct InterfaceSpec {
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
pub r#type: String,
|
||||
pub state: String,
|
||||
pub mac_address: Option<String>,
|
||||
pub struct HostNameState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<DnsResolverConfig>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<DnsResolverConfig>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsResolverConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub search: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RouteRuleState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<Vec<RouteRule>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<Vec<RouteRule>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RouteState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<Vec<Route>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<Vec<Route>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RouteRule {
|
||||
#[serde(rename = "ip-from", skip_serializing_if = "Option::is_none")]
|
||||
pub ip_from: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub priority: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub route_table: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Route {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub destination: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub metric: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_hop_address: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_hop_interface: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub table_id: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mtu: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsDbGlobalConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub external_ids: Option<BTreeMap<String, String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub other_config: Option<BTreeMap<String, String>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvnConfiguration {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_mappings: Option<Vec<OvnBridgeMapping>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvnBridgeMapping {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub localnet: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(untagged)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum StpSpec {
|
||||
Bool(bool),
|
||||
Options(StpOptions),
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LldpState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enabled: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsDb {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub external_ids: Option<BTreeMap<String, String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub other_config: Option<BTreeMap<String, String>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct PatchState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub peer: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Interface {
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
pub r#type: InterfaceType,
|
||||
pub state: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mac_address: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub copy_mac_from: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mtu: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub controller: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ipv4: Option<IpStackSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ipv6: Option<IpStackSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ethernet: Option<EthernetSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub link_aggregation: Option<BondSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vlan: Option<VlanSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vxlan: Option<VxlanSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mac_vtap: Option<MacVtapSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mac_vlan: Option<MacVlanSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub infiniband: Option<InfinibandSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub linux_bridge: Option<LinuxBridgeSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(alias = "bridge")]
|
||||
pub ovs_bridge: Option<OvsBridgeSpec>,
|
||||
pub ethtool: Option<EthtoolSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ethtool: Option<Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub accept_all_mac_addresses: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub identifier: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lldp: Option<LldpState>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub permanent_mac_address: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_mtu: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub min_mtu: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mptcp: Option<Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub profile_name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub wait_ip: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ovs_db: Option<OvsDb>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub driver: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub patch: Option<PatchState>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum InterfaceType {
|
||||
#[serde(rename = "unknown")]
|
||||
Unknown,
|
||||
#[serde(rename = "dummy")]
|
||||
Dummy,
|
||||
#[serde(rename = "loopback")]
|
||||
Loopback,
|
||||
#[serde(rename = "linux-bridge")]
|
||||
LinuxBridge,
|
||||
#[serde(rename = "ovs-bridge")]
|
||||
OvsBridge,
|
||||
#[serde(rename = "ovs-interface")]
|
||||
OvsInterface,
|
||||
#[serde(rename = "bond")]
|
||||
Bond,
|
||||
#[serde(rename = "ipvlan")]
|
||||
IpVlan,
|
||||
#[serde(rename = "vlan")]
|
||||
Vlan,
|
||||
#[serde(rename = "vxlan")]
|
||||
Vxlan,
|
||||
#[serde(rename = "mac-vlan")]
|
||||
Macvlan,
|
||||
#[serde(rename = "mac-vtap")]
|
||||
Macvtap,
|
||||
#[serde(rename = "ethernet")]
|
||||
Ethernet,
|
||||
#[serde(rename = "infiniband")]
|
||||
Infiniband,
|
||||
#[serde(rename = "vrf")]
|
||||
Vrf,
|
||||
#[serde(rename = "veth")]
|
||||
Veth,
|
||||
#[serde(rename = "ipsec")]
|
||||
Ipsec,
|
||||
#[serde(rename = "hsr")]
|
||||
Hrs,
|
||||
}
|
||||
|
||||
impl Default for InterfaceType {
|
||||
fn default() -> Self {
|
||||
Self::Loopback
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct IpStackSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enabled: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub dhcp: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub autoconf: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address: Option<Vec<IpAddressSpec>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auto_dns: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auto_gateway: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auto_routes: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub dhcp_client_id: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub dhcp_duid: Option<String>,
|
||||
}
|
||||
|
||||
@@ -102,8 +405,11 @@ pub struct IpAddressSpec {
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct EthernetSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub speed: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub duplex: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auto_negotiation: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -112,6 +418,7 @@ pub struct EthernetSpec {
|
||||
pub struct BondSpec {
|
||||
pub mode: String,
|
||||
pub ports: Vec<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub options: Option<BTreeMap<String, Value>>,
|
||||
}
|
||||
|
||||
@@ -120,6 +427,7 @@ pub struct BondSpec {
|
||||
pub struct VlanSpec {
|
||||
pub base_iface: String,
|
||||
pub id: u16,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub protocol: Option<String>,
|
||||
}
|
||||
|
||||
@@ -129,8 +437,11 @@ pub struct VxlanSpec {
|
||||
pub base_iface: String,
|
||||
pub id: u32,
|
||||
pub remote: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub local: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub learning: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub destination_port: Option<u16>,
|
||||
}
|
||||
|
||||
@@ -139,6 +450,7 @@ pub struct VxlanSpec {
|
||||
pub struct MacVtapSpec {
|
||||
pub base_iface: String,
|
||||
pub mode: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub promiscuous: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -147,6 +459,7 @@ pub struct MacVtapSpec {
|
||||
pub struct MacVlanSpec {
|
||||
pub base_iface: String,
|
||||
pub mode: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub promiscuous: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -161,25 +474,35 @@ pub struct InfinibandSpec {
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LinuxBridgeSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub options: Option<LinuxBridgeOptions>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ports: Option<Vec<LinuxBridgePort>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LinuxBridgeOptions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mac_ageing_time: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub multicast_snooping: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub stp: Option<StpOptions>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct StpOptions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enabled: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub forward_delay: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub hello_time: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_age: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub priority: Option<u16>,
|
||||
}
|
||||
|
||||
@@ -187,15 +510,20 @@ pub struct StpOptions {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LinuxBridgePort {
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vlan: Option<LinuxBridgePortVlan>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LinuxBridgePortVlan {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub trunk_tags: Option<Vec<VlanTag>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tag: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable_native: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -203,6 +531,7 @@ pub struct LinuxBridgePortVlan {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct VlanTag {
|
||||
pub id: u16,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id_range: Option<VlanIdRange>,
|
||||
}
|
||||
|
||||
@@ -216,36 +545,35 @@ pub struct VlanIdRange {
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsBridgeSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub options: Option<OvsBridgeOptions>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ports: Option<Vec<OvsPortSpec>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsBridgeOptions {
|
||||
pub stp: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub stp: Option<StpSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rstp: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mcast_snooping_enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub datapath: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fail_mode: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsPortSpec {
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub link_aggregation: Option<BondSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vlan: Option<LinuxBridgePortVlan>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub r#type: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct EthtoolSpec {
|
||||
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct EthtoolFecSpec {
|
||||
pub auto: Option<bool>,
|
||||
pub mode: Option<String>,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use log::{info, warn};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
@@ -9,7 +9,7 @@ use crate::{
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
|
||||
topology::{HostNetworkConfig, NetworkInterface, NetworkManager, Switch, SwitchPort, Topology},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
@@ -17,7 +17,7 @@ pub struct HostNetworkConfigurationScore {
|
||||
pub hosts: Vec<PhysicalHost>,
|
||||
}
|
||||
|
||||
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
|
||||
impl<T: Topology + NetworkManager + Switch> Score<T> for HostNetworkConfigurationScore {
|
||||
fn name(&self) -> String {
|
||||
"HostNetworkConfigurationScore".into()
|
||||
}
|
||||
@@ -35,34 +35,91 @@ pub struct HostNetworkConfigurationInterpret {
|
||||
}
|
||||
|
||||
impl HostNetworkConfigurationInterpret {
|
||||
async fn configure_network_for_host<T: Topology + Switch>(
|
||||
async fn configure_network_for_host<T: Topology + NetworkManager + Switch>(
|
||||
&self,
|
||||
topology: &T,
|
||||
host: &PhysicalHost,
|
||||
) -> Result<(), InterpretError> {
|
||||
let switch_ports = self.collect_switch_ports_for_host(topology, host).await?;
|
||||
if !switch_ports.is_empty() {
|
||||
topology
|
||||
.configure_host_network(host, HostNetworkConfig { switch_ports })
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
||||
current_host: &usize,
|
||||
total_hosts: &usize,
|
||||
) -> Result<HostNetworkConfig, InterpretError> {
|
||||
if host.network.is_empty() {
|
||||
info!("[Host {current_host}/{total_hosts}] No interfaces to configure, skipping");
|
||||
return Ok(HostNetworkConfig {
|
||||
host_id: host.id.clone(),
|
||||
switch_ports: vec![],
|
||||
});
|
||||
}
|
||||
if host.network.len() == 1 {
|
||||
info!("[Host {current_host}/{total_hosts}] Only one interface to configure, skipping");
|
||||
return Ok(HostNetworkConfig {
|
||||
host_id: host.id.clone(),
|
||||
switch_ports: vec![],
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
let switch_ports = self
|
||||
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
|
||||
.await?;
|
||||
|
||||
let config = HostNetworkConfig {
|
||||
host_id: host.id.clone(),
|
||||
switch_ports,
|
||||
};
|
||||
|
||||
if config.switch_ports.len() > 1 {
|
||||
info!(
|
||||
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
|
||||
config.switch_ports.len(),
|
||||
host.network.len()
|
||||
);
|
||||
|
||||
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
|
||||
topology.configure_bond(&config).await.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to configure host network: {e}"))
|
||||
})?;
|
||||
topology
|
||||
.configure_port_channel(&config)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to configure host network: {e}"))
|
||||
})?;
|
||||
} else if config.switch_ports.is_empty() {
|
||||
info!(
|
||||
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
|
||||
host.network.len()
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
"[Host {current_host}/{total_hosts}] Found a single port for {} interfaces, skipping",
|
||||
host.network.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
||||
&self,
|
||||
topology: &T,
|
||||
host: &PhysicalHost,
|
||||
current_host: &usize,
|
||||
total_hosts: &usize,
|
||||
) -> Result<Vec<SwitchPort>, InterpretError> {
|
||||
let mut switch_ports = vec![];
|
||||
|
||||
if host.network.is_empty() {
|
||||
return Ok(switch_ports);
|
||||
}
|
||||
|
||||
info!("[Host {current_host}/{total_hosts}] Collecting ports on switch...");
|
||||
for network_interface in &host.network {
|
||||
let mac_address = network_interface.mac_address;
|
||||
|
||||
match topology.get_port_for_mac_address(&mac_address).await {
|
||||
Ok(Some(port)) => {
|
||||
info!(
|
||||
"[Host {current_host}/{total_hosts}] Found port '{port}' for '{mac_address}'"
|
||||
);
|
||||
switch_ports.push(SwitchPort {
|
||||
interface: NetworkInterface {
|
||||
name: network_interface.name.clone(),
|
||||
@@ -73,7 +130,7 @@ impl HostNetworkConfigurationInterpret {
|
||||
port,
|
||||
});
|
||||
}
|
||||
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
|
||||
Ok(None) => {}
|
||||
Err(e) => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to get port for host '{}': {}",
|
||||
@@ -85,10 +142,42 @@ impl HostNetworkConfigurationInterpret {
|
||||
|
||||
Ok(switch_ports)
|
||||
}
|
||||
|
||||
fn format_host_configuration(&self, configs: Vec<HostNetworkConfig>) -> Vec<String> {
|
||||
let mut report = vec![
|
||||
"Network Configuration Report".to_string(),
|
||||
"------------------------------------------------------------------".to_string(),
|
||||
];
|
||||
|
||||
for config in configs {
|
||||
if config.switch_ports.is_empty() {
|
||||
report.push(format!(
|
||||
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
||||
config.host_id
|
||||
));
|
||||
} else {
|
||||
let mappings: Vec<String> = config
|
||||
.switch_ports
|
||||
.iter()
|
||||
.map(|p| format!("[{} -> {}]", p.interface.name, p.port))
|
||||
.collect();
|
||||
|
||||
report.push(format!(
|
||||
"✅ Host {}: Bonded {} port(s) {}",
|
||||
config.host_id,
|
||||
config.switch_ports.len(),
|
||||
mappings.join(", ")
|
||||
));
|
||||
}
|
||||
}
|
||||
report
|
||||
.push("------------------------------------------------------------------".to_string());
|
||||
report
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
impl<T: Topology + NetworkManager + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
||||
}
|
||||
@@ -114,27 +203,45 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
return Ok(Outcome::noop("No hosts to configure".into()));
|
||||
}
|
||||
|
||||
info!(
|
||||
"Started network configuration for {} host(s)...",
|
||||
self.score.hosts.len()
|
||||
);
|
||||
let host_count = self.score.hosts.len();
|
||||
info!("Started network configuration for {host_count} host(s)...",);
|
||||
|
||||
info!("Setting up NetworkManager...",);
|
||||
topology
|
||||
.ensure_network_manager_installed()
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("NetworkManager setup failed: {e}")))?;
|
||||
|
||||
info!("Setting up switch with sane defaults...");
|
||||
topology
|
||||
.setup_switch()
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
|
||||
info!("Switch ready");
|
||||
|
||||
let mut current_host = 1;
|
||||
let mut host_configurations = vec![];
|
||||
|
||||
let mut configured_host_count = 0;
|
||||
for host in &self.score.hosts {
|
||||
self.configure_network_for_host(topology, host).await?;
|
||||
configured_host_count += 1;
|
||||
let host_configuration = self
|
||||
.configure_network_for_host(topology, host, ¤t_host, &host_count)
|
||||
.await?;
|
||||
|
||||
host_configurations.push(host_configuration);
|
||||
current_host += 1;
|
||||
}
|
||||
|
||||
if configured_host_count > 0 {
|
||||
Ok(Outcome::success(format!(
|
||||
"Configured {configured_host_count}/{} host(s)",
|
||||
self.score.hosts.len()
|
||||
)))
|
||||
if current_host > 1 {
|
||||
let details = self.format_host_configuration(host_configurations);
|
||||
|
||||
Ok(Outcome::success_with_details(
|
||||
format!(
|
||||
"Configured {}/{} host(s)",
|
||||
current_host - 1,
|
||||
self.score.hosts.len()
|
||||
),
|
||||
details,
|
||||
))
|
||||
} else {
|
||||
Ok(Outcome::noop("No hosts configured".into()))
|
||||
}
|
||||
@@ -144,13 +251,14 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use assertor::*;
|
||||
use brocade::PortOperatingMode;
|
||||
use harmony_types::{net::MacAddress, switch::PortLocation};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use crate::{
|
||||
hardware::HostCategory,
|
||||
topology::{
|
||||
HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
|
||||
HostNetworkConfig, NetworkError, PortConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
@@ -175,6 +283,18 @@ mod tests {
|
||||
speed_mbps: None,
|
||||
mtu: 1,
|
||||
};
|
||||
pub static ref YET_ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F3".to_string()).unwrap(),
|
||||
name: "interface-3".into(),
|
||||
speed_mbps: None,
|
||||
mtu: 1,
|
||||
};
|
||||
pub static ref LAST_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F4".to_string()).unwrap(),
|
||||
name: "interface-4".into(),
|
||||
speed_mbps: None,
|
||||
mtu: 1,
|
||||
};
|
||||
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
||||
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
||||
name: "unknown-interface".into(),
|
||||
@@ -183,6 +303,8 @@ mod tests {
|
||||
};
|
||||
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
||||
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
||||
pub static ref YET_ANOTHER_PORT: PortLocation = PortLocation(1, 0, 45);
|
||||
pub static ref LAST_PORT: PortLocation = PortLocation(2, 0, 45);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -198,27 +320,33 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
||||
async fn should_setup_network_manager() {
|
||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||
let score = given_score(vec![host]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
switch_ports: vec![SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
}],
|
||||
},
|
||||
)]);
|
||||
let network_manager_setup = topology.network_manager_setup.lock().unwrap();
|
||||
assert_that!(*network_manager_setup).is_true();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
|
||||
async fn host_with_one_mac_address_should_skip_host_configuration() {
|
||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||
let score = given_score(vec![host]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn host_with_multiple_mac_addresses_should_configure_one_bond_with_all_interfaces() {
|
||||
let score = given_score(vec![given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
@@ -230,10 +358,11 @@ mod tests {
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
@@ -249,47 +378,183 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_hosts_should_create_one_bond_per_host() {
|
||||
async fn host_with_multiple_mac_addresses_should_configure_one_port_channel_with_all_interfaces()
|
||||
{
|
||||
let score = given_score(vec![given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
EXISTING_INTERFACE.clone(),
|
||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
)]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
)]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_hosts_should_configure_one_bond_per_host() {
|
||||
let score = given_score(vec![
|
||||
given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
|
||||
given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
|
||||
given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
EXISTING_INTERFACE.clone(),
|
||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
given_host(
|
||||
&ANOTHER_HOST_ID,
|
||||
vec![
|
||||
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
LAST_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).contains_exactly(vec![
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![
|
||||
(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
switch_ports: vec![SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
}],
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
ANOTHER_HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
switch_ports: vec![SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
}],
|
||||
host_id: ANOTHER_HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: YET_ANOTHER_PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: LAST_EXISTING_INTERFACE.clone(),
|
||||
port: LAST_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
||||
async fn multiple_hosts_should_configure_one_port_channel_per_host() {
|
||||
let score = given_score(vec![
|
||||
given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
EXISTING_INTERFACE.clone(),
|
||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
given_host(
|
||||
&ANOTHER_HOST_ID,
|
||||
vec![
|
||||
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
LAST_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![
|
||||
(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
ANOTHER_HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: ANOTHER_HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: YET_ANOTHER_PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: LAST_EXISTING_INTERFACE.clone(),
|
||||
port: LAST_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn port_not_found_for_mac_address_should_not_configure_host() {
|
||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||
let topology = TopologyWithSwitch::new_port_not_found();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).is_empty();
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn only_one_port_found_for_multiple_mac_addresses_should_not_configure_host() {
|
||||
let score = given_score(vec![given_host(
|
||||
&HOST_ID,
|
||||
vec![EXISTING_INTERFACE.clone(), UNKNOWN_INTERFACE.clone()],
|
||||
)]);
|
||||
let topology = TopologyWithSwitch::new_single_port_found();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
}
|
||||
|
||||
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
||||
@@ -326,26 +591,48 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TopologyWithSwitch {
|
||||
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
||||
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||
configured_port_channels: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||
switch_setup: Arc<Mutex<bool>>,
|
||||
network_manager_setup: Arc<Mutex<bool>>,
|
||||
configured_bonds: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||
}
|
||||
|
||||
impl TopologyWithSwitch {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||
available_ports: Arc::new(Mutex::new(vec![
|
||||
PORT.clone(),
|
||||
ANOTHER_PORT.clone(),
|
||||
YET_ANOTHER_PORT.clone(),
|
||||
LAST_PORT.clone(),
|
||||
])),
|
||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||
switch_setup: Arc::new(Mutex::new(false)),
|
||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_port_not_found() -> Self {
|
||||
Self {
|
||||
available_ports: Arc::new(Mutex::new(vec![])),
|
||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||
switch_setup: Arc::new(Mutex::new(false)),
|
||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_single_port_found() -> Self {
|
||||
Self {
|
||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone()])),
|
||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||
switch_setup: Arc::new(Mutex::new(false)),
|
||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -361,6 +648,22 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NetworkManager for TopologyWithSwitch {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||
let mut network_manager_installed = self.network_manager_setup.lock().unwrap();
|
||||
*network_manager_installed = true;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||
let mut configured_bonds = self.configured_bonds.lock().unwrap();
|
||||
configured_bonds.push((config.host_id.clone(), config.clone()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Switch for TopologyWithSwitch {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
@@ -380,15 +683,23 @@ mod tests {
|
||||
Ok(Some(ports.remove(0)))
|
||||
}
|
||||
|
||||
async fn configure_host_network(
|
||||
async fn configure_port_channel(
|
||||
&self,
|
||||
host: &PhysicalHost,
|
||||
config: HostNetworkConfig,
|
||||
config: &HostNetworkConfig,
|
||||
) -> Result<(), SwitchError> {
|
||||
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
||||
configured_host_networks.push((host.id.clone(), config.clone()));
|
||||
let mut configured_port_channels = self.configured_port_channels.lock().unwrap();
|
||||
configured_port_channels.push((config.host_id.clone(), config.clone()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(
|
||||
&self,
|
||||
port_config: &Vec<PortConfig>,
|
||||
) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
use crate::{
|
||||
modules::okd::{
|
||||
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore,
|
||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
|
||||
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
||||
},
|
||||
score::Score,
|
||||
@@ -65,6 +65,7 @@ impl OKDInstallationPipeline {
|
||||
Box::new(OKDSetup01InventoryScore::new()),
|
||||
Box::new(OKDSetup02BootstrapScore::new()),
|
||||
Box::new(OKDSetup03ControlPlaneScore::new()),
|
||||
Box::new(OKDSetupPersistNetworkBondScore::new()),
|
||||
Box::new(OKDSetup04WorkersScore::new()),
|
||||
Box::new(OKDSetup05SanityCheckScore::new()),
|
||||
Box::new(OKDSetup06InstallationReportScore::new()),
|
||||
|
||||
@@ -6,6 +6,7 @@ mod bootstrap_05_sanity_check;
|
||||
mod bootstrap_06_installation_report;
|
||||
pub mod bootstrap_dhcp;
|
||||
pub mod bootstrap_load_balancer;
|
||||
mod bootstrap_persist_network_bond;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
pub mod installation;
|
||||
@@ -19,5 +20,6 @@ pub use bootstrap_03_control_plane::*;
|
||||
pub use bootstrap_04_workers::*;
|
||||
pub use bootstrap_05_sanity_check::*;
|
||||
pub use bootstrap_06_installation_report::*;
|
||||
pub use bootstrap_persist_network_bond::*;
|
||||
pub mod crd;
|
||||
pub mod host_network;
|
||||
|
||||
@@ -12,7 +12,8 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::C
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
|
||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||
GrafanaDatasourceSpec, GrafanaSpec,
|
||||
GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSecretKeyRef, GrafanaSpec,
|
||||
GrafanaValueFrom, GrafanaValueSource,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
|
||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||
@@ -39,7 +40,7 @@ use crate::{
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use super::prometheus::PrometheusApplicationMonitoring;
|
||||
use super::prometheus::PrometheusMonitoring;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct K8sPrometheusCRDAlertingScore {
|
||||
@@ -49,7 +50,7 @@ pub struct K8sPrometheusCRDAlertingScore {
|
||||
pub prometheus_rules: Vec<RuleGroup>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Score<T>
|
||||
for K8sPrometheusCRDAlertingScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
@@ -75,7 +76,7 @@ pub struct K8sPrometheusCRDAlertingInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Interpret<T>
|
||||
for K8sPrometheusCRDAlertingInterpret
|
||||
{
|
||||
async fn execute(
|
||||
@@ -466,10 +467,13 @@ impl K8sPrometheusCRDAlertingInterpret {
|
||||
match_labels: label.clone(),
|
||||
match_expressions: vec![],
|
||||
};
|
||||
let mut json_data = BTreeMap::new();
|
||||
json_data.insert("timeInterval".to_string(), "5s".to_string());
|
||||
let namespace = self.sender.namespace.clone();
|
||||
|
||||
let json_data = GrafanaDatasourceJsonData {
|
||||
time_interval: Some("5s".to_string()),
|
||||
http_header_name1: None,
|
||||
tls_skip_verify: Some(true),
|
||||
oauth_pass_thru: Some(true),
|
||||
};
|
||||
let json = build_default_dashboard(&namespace);
|
||||
|
||||
let graf_data_source = GrafanaDatasource {
|
||||
@@ -495,7 +499,11 @@ impl K8sPrometheusCRDAlertingInterpret {
|
||||
"http://prometheus-operated.{}.svc.cluster.local:9090",
|
||||
self.sender.namespace.clone()
|
||||
),
|
||||
secure_json_data: None,
|
||||
is_default: None,
|
||||
editable: None,
|
||||
},
|
||||
values_from: None,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -516,7 +524,9 @@ impl K8sPrometheusCRDAlertingInterpret {
|
||||
spec: GrafanaDashboardSpec {
|
||||
resync_period: Some("30s".to_string()),
|
||||
instance_selector: labels.clone(),
|
||||
json,
|
||||
json: Some(json),
|
||||
grafana_com: None,
|
||||
datasources: None,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -9,11 +9,17 @@ use crate::{
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
pub trait PrometheusApplicationMonitoring<S: AlertSender> {
|
||||
pub trait PrometheusMonitoring<S: AlertSender> {
|
||||
async fn install_prometheus(
|
||||
&self,
|
||||
sender: &S,
|
||||
inventory: &Inventory,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
|
||||
) -> Result<PreparationOutcome, PreparationError>;
|
||||
|
||||
async fn ensure_prometheus_operator(
|
||||
&self,
|
||||
sender: &S,
|
||||
inventory: &Inventory,
|
||||
) -> Result<PreparationOutcome, PreparationError>;
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::{
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use super::prometheus::PrometheusApplicationMonitoring;
|
||||
use super::prometheus::PrometheusMonitoring;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct RHOBAlertingScore {
|
||||
@@ -48,8 +48,8 @@ pub struct RHOBAlertingScore {
|
||||
pub prometheus_rules: Vec<RuleGroup>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Score<T> for RHOBAlertingScore
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Score<T>
|
||||
for RHOBAlertingScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
Box::new(RHOBAlertingInterpret {
|
||||
@@ -74,8 +74,8 @@ pub struct RHOBAlertingInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Interpret<T> for RHOBAlertingInterpret
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Interpret<T>
|
||||
for RHOBAlertingInterpret
|
||||
{
|
||||
async fn execute(
|
||||
&self,
|
||||
|
||||
@@ -40,7 +40,7 @@ pub fn init() {
|
||||
HarmonyEvent::HarmonyFinished => {
|
||||
if !details.is_empty() {
|
||||
println!(
|
||||
"\n{} All done! Here's what's next for you:",
|
||||
"\n{} All done! Here's a few info for you:",
|
||||
theme::EMOJI_SUMMARY
|
||||
);
|
||||
for detail in details.iter() {
|
||||
|
||||
11
harmony_inventory_agent/build_docker.sh
Executable file
11
harmony_inventory_agent/build_docker.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
cargo build -p harmony_inventory_agent --release --target x86_64-unknown-linux-musl
|
||||
|
||||
SCRIPT_DIR="$(dirname ${0})"
|
||||
|
||||
cd "${SCRIPT_DIR}/docker/"
|
||||
|
||||
cp ../../target/x86_64-unknown-linux-musl/release/harmony_inventory_agent .
|
||||
|
||||
docker build . -t hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
|
||||
docker push hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
1
harmony_inventory_agent/docker/.gitignore
vendored
Normal file
1
harmony_inventory_agent/docker/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
harmony_inventory_agent
|
||||
17
harmony_inventory_agent/docker/Dockerfile
Normal file
17
harmony_inventory_agent/docker/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM debian:12-slim
|
||||
|
||||
# install packages required to make these commands available : lspci, lsmod, dmidecode, smartctl, ip
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends pciutils kmod dmidecode smartmontools iproute2 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app/
|
||||
|
||||
COPY harmony_inventory_agent /app/
|
||||
|
||||
ENV RUST_LOG=info
|
||||
|
||||
CMD [ "/app/harmony_inventory_agent" ]
|
||||
|
||||
117
harmony_inventory_agent/harmony-inventory-agent-daemonset.yaml
Normal file
117
harmony_inventory_agent/harmony-inventory-agent-daemonset.yaml
Normal file
@@ -0,0 +1,117 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
pod-security.kubernetes.io/audit: privileged
|
||||
pod-security.kubernetes.io/warn: privileged
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
---
|
||||
# Grant the built-in "privileged" SCC to the SA
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: use-privileged-scc
|
||||
namespace: harmony-inventory-agent
|
||||
rules:
|
||||
- apiGroups: ["security.openshift.io"]
|
||||
resources: ["securitycontextconstraints"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: use-privileged-scc
|
||||
namespace: harmony-inventory-agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: use-privileged-scc
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: harmony-inventory-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: harmony-inventory-agent
|
||||
spec:
|
||||
serviceAccountName: harmony-inventory-agent
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: inventory-agent
|
||||
image: hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "harmony_inventory_agent=trace,info"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
# optional: leave the rest unset since privileged SCC allows it
|
||||
#
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: harmony-inventory-builder
|
||||
namespace: harmony-inventory-agent
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy: {}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: harmony-inventory-builder
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: harmony-inventory-builder
|
||||
spec:
|
||||
serviceAccountName: harmony-inventory-agent
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: inventory-agent
|
||||
image: hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "harmony_inventory_builder=trace,info"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
# optional: leave the rest unset since privileged SCC allows it
|
||||
@@ -1,5 +1,5 @@
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::{debug, warn};
|
||||
use log::{debug, trace, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
@@ -121,20 +121,48 @@ pub struct ManagementInterface {
|
||||
|
||||
impl PhysicalHost {
|
||||
pub fn gather() -> Result<Self, String> {
|
||||
trace!("Start gathering physical host information");
|
||||
let mut sys = System::new_all();
|
||||
trace!("System new_all called");
|
||||
sys.refresh_all();
|
||||
trace!("System refresh_all called");
|
||||
|
||||
Self::all_tools_available()?;
|
||||
|
||||
trace!("All tools_available success");
|
||||
|
||||
let storage_drives = Self::gather_storage_drives()?;
|
||||
trace!("got storage drives");
|
||||
|
||||
let storage_controller = Self::gather_storage_controller()?;
|
||||
trace!("got storage controller");
|
||||
|
||||
let memory_modules = Self::gather_memory_modules()?;
|
||||
trace!("got memory_modules");
|
||||
|
||||
let cpus = Self::gather_cpus(&sys)?;
|
||||
trace!("got cpus");
|
||||
|
||||
let chipset = Self::gather_chipset()?;
|
||||
trace!("got chipsets");
|
||||
|
||||
let network_interfaces = Self::gather_network_interfaces()?;
|
||||
trace!("got network_interfaces");
|
||||
|
||||
let management_interface = Self::gather_management_interface()?;
|
||||
trace!("got management_interface");
|
||||
|
||||
let host_uuid = Self::get_host_uuid()?;
|
||||
|
||||
Ok(Self {
|
||||
storage_drives: Self::gather_storage_drives()?,
|
||||
storage_controller: Self::gather_storage_controller()?,
|
||||
memory_modules: Self::gather_memory_modules()?,
|
||||
cpus: Self::gather_cpus(&sys)?,
|
||||
chipset: Self::gather_chipset()?,
|
||||
network_interfaces: Self::gather_network_interfaces()?,
|
||||
management_interface: Self::gather_management_interface()?,
|
||||
host_uuid: Self::get_host_uuid()?,
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -208,6 +236,8 @@ impl PhysicalHost {
|
||||
));
|
||||
}
|
||||
|
||||
debug!("All tools found!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -231,7 +261,10 @@ impl PhysicalHost {
|
||||
fn gather_storage_drives() -> Result<Vec<StorageDrive>, String> {
|
||||
let mut drives = Vec::new();
|
||||
|
||||
trace!("Starting storage drive discovery using lsblk");
|
||||
|
||||
// Use lsblk with JSON output for robust parsing
|
||||
trace!("Executing 'lsblk -d -o NAME,MODEL,SERIAL,SIZE,ROTA,WWN -n -e 7 --json'");
|
||||
let output = Command::new("lsblk")
|
||||
.args([
|
||||
"-d",
|
||||
@@ -245,13 +278,18 @@ impl PhysicalHost {
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute lsblk: {}", e))?;
|
||||
|
||||
trace!(
|
||||
"lsblk command executed successfully (status: {:?})",
|
||||
output.status
|
||||
);
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(format!(
|
||||
"lsblk command failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
));
|
||||
let stderr_str = String::from_utf8_lossy(&output.stderr);
|
||||
debug!("lsblk command failed: {stderr_str}");
|
||||
return Err(format!("lsblk command failed: {stderr_str}"));
|
||||
}
|
||||
|
||||
trace!("Parsing lsblk JSON output");
|
||||
let json: Value = serde_json::from_slice(&output.stdout)
|
||||
.map_err(|e| format!("Failed to parse lsblk JSON output: {}", e))?;
|
||||
|
||||
@@ -260,6 +298,8 @@ impl PhysicalHost {
|
||||
.and_then(|v| v.as_array())
|
||||
.ok_or("Invalid lsblk JSON: missing 'blockdevices' array")?;
|
||||
|
||||
trace!("Found {} blockdevices in lsblk output", blockdevices.len());
|
||||
|
||||
for device in blockdevices {
|
||||
let name = device
|
||||
.get("name")
|
||||
@@ -268,52 +308,72 @@ impl PhysicalHost {
|
||||
.to_string();
|
||||
|
||||
if name.is_empty() {
|
||||
trace!("Skipping unnamed device entry: {:?}", device);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Inspecting block device: {name}");
|
||||
|
||||
// Extract metadata fields
|
||||
let model = device
|
||||
.get("model")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.unwrap_or_default();
|
||||
trace!("Model for {name}: '{}'", model);
|
||||
|
||||
let serial = device
|
||||
.get("serial")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.unwrap_or_default();
|
||||
trace!("Serial for {name}: '{}'", serial);
|
||||
|
||||
let size_str = device
|
||||
.get("size")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or("Missing 'size' in lsblk device")?;
|
||||
trace!("Reported size for {name}: {}", size_str);
|
||||
let size_bytes = Self::parse_size(size_str)?;
|
||||
trace!("Parsed size for {name}: {} bytes", size_bytes);
|
||||
|
||||
let rotational = device
|
||||
.get("rota")
|
||||
.and_then(|v| v.as_bool())
|
||||
.ok_or("Missing 'rota' in lsblk device")?;
|
||||
trace!("Rotational flag for {name}: {}", rotational);
|
||||
|
||||
let wwn = device
|
||||
.get("wwn")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty() && s != "null");
|
||||
trace!("WWN for {name}: {:?}", wwn);
|
||||
|
||||
let device_path = Path::new("/sys/block").join(&name);
|
||||
trace!("Sysfs path for {name}: {:?}", device_path);
|
||||
|
||||
trace!("Reading logical block size for {name}");
|
||||
let logical_block_size = Self::read_sysfs_u32(
|
||||
&device_path.join("queue/logical_block_size"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read logical block size for {}: {}", name, e))?;
|
||||
trace!("Logical block size for {name}: {}", logical_block_size);
|
||||
|
||||
trace!("Reading physical block size for {name}");
|
||||
let physical_block_size = Self::read_sysfs_u32(
|
||||
&device_path.join("queue/physical_block_size"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read physical block size for {}: {}", name, e))?;
|
||||
trace!("Physical block size for {name}: {}", physical_block_size);
|
||||
|
||||
trace!("Determining interface type for {name}");
|
||||
let interface_type = Self::get_interface_type(&name, &device_path)?;
|
||||
trace!("Interface type for {name}: {}", interface_type);
|
||||
|
||||
trace!("Getting SMART status for {name}");
|
||||
let smart_status = Self::get_smart_status(&name)?;
|
||||
trace!("SMART status for {name}: {:?}", smart_status);
|
||||
|
||||
let mut drive = StorageDrive {
|
||||
name: name.clone(),
|
||||
@@ -330,19 +390,31 @@ impl PhysicalHost {
|
||||
|
||||
// Enhance with additional sysfs info if available
|
||||
if device_path.exists() {
|
||||
trace!("Enhancing drive {name} with extra sysfs metadata");
|
||||
if drive.model.is_empty() {
|
||||
trace!("Reading model from sysfs for {name}");
|
||||
drive.model = Self::read_sysfs_string(&device_path.join("device/model"))
|
||||
.unwrap_or(format!("Failed to read model for {}", name));
|
||||
.unwrap_or_else(|_| format!("Failed to read model for {}", name));
|
||||
}
|
||||
if drive.serial.is_empty() {
|
||||
trace!("Reading serial from sysfs for {name}");
|
||||
drive.serial = Self::read_sysfs_string(&device_path.join("device/serial"))
|
||||
.unwrap_or(format!("Failed to read serial for {}", name));
|
||||
.unwrap_or_else(|_| format!("Failed to read serial for {}", name));
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"Sysfs path {:?} not found for drive {name}, skipping extra metadata",
|
||||
device_path
|
||||
);
|
||||
}
|
||||
|
||||
debug!("Discovered storage drive: {drive:?}");
|
||||
drives.push(drive);
|
||||
}
|
||||
|
||||
debug!("Discovered total {} storage drives", drives.len());
|
||||
trace!("All discovered dives: {drives:?}");
|
||||
|
||||
Ok(drives)
|
||||
}
|
||||
|
||||
@@ -418,6 +490,8 @@ impl PhysicalHost {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found storage controller {controller:?}");
|
||||
|
||||
Ok(controller)
|
||||
}
|
||||
|
||||
@@ -486,6 +560,7 @@ impl PhysicalHost {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found memory modules {modules:?}");
|
||||
Ok(modules)
|
||||
}
|
||||
|
||||
@@ -501,22 +576,30 @@ impl PhysicalHost {
|
||||
frequency_mhz: global_cpu.frequency(),
|
||||
});
|
||||
|
||||
debug!("Found cpus {cpus:?}");
|
||||
|
||||
Ok(cpus)
|
||||
}
|
||||
|
||||
fn gather_chipset() -> Result<Chipset, String> {
|
||||
Ok(Chipset {
|
||||
let chipset = Chipset {
|
||||
name: Self::read_dmi("baseboard-product-name")?,
|
||||
vendor: Self::read_dmi("baseboard-manufacturer")?,
|
||||
})
|
||||
};
|
||||
|
||||
debug!("Found chipset {chipset:?}");
|
||||
|
||||
Ok(chipset)
|
||||
}
|
||||
|
||||
fn gather_network_interfaces() -> Result<Vec<NetworkInterface>, String> {
|
||||
let mut interfaces = Vec::new();
|
||||
let sys_net_path = Path::new("/sys/class/net");
|
||||
trace!("Reading /sys/class/net");
|
||||
|
||||
let entries = fs::read_dir(sys_net_path)
|
||||
.map_err(|e| format!("Failed to read /sys/class/net: {}", e))?;
|
||||
trace!("Got entries {entries:?}");
|
||||
|
||||
for entry in entries {
|
||||
let entry = entry.map_err(|e| format!("Failed to read directory entry: {}", e))?;
|
||||
@@ -525,6 +608,7 @@ impl PhysicalHost {
|
||||
.into_string()
|
||||
.map_err(|_| "Invalid UTF-8 in interface name")?;
|
||||
let iface_path = entry.path();
|
||||
trace!("Inspecting interface {iface_name} path {iface_path:?}");
|
||||
|
||||
// Skip virtual interfaces
|
||||
if iface_name.starts_with("lo")
|
||||
@@ -535,70 +619,101 @@ impl PhysicalHost {
|
||||
|| iface_name.starts_with("tun")
|
||||
|| iface_name.starts_with("wg")
|
||||
{
|
||||
trace!(
|
||||
"Skipping interface {iface_name} because it appears to be virtual/unsupported"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if it's a physical interface by looking for device directory
|
||||
if !iface_path.join("device").exists() {
|
||||
trace!(
|
||||
"Skipping interface {iface_name} since {iface_path:?}/device does not exist"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Reading MAC address for {iface_name}");
|
||||
let mac_address = Self::read_sysfs_string(&iface_path.join("address"))
|
||||
.map_err(|e| format!("Failed to read MAC address for {}: {}", iface_name, e))?;
|
||||
let mac_address = MacAddress::try_from(mac_address).map_err(|e| e.to_string())?;
|
||||
trace!("MAC address for {iface_name}: {mac_address}");
|
||||
|
||||
let speed_mbps = if iface_path.join("speed").exists() {
|
||||
match Self::read_sysfs_u32(&iface_path.join("speed")) {
|
||||
Ok(speed) => Some(speed),
|
||||
let speed_path = iface_path.join("speed");
|
||||
let speed_mbps = if speed_path.exists() {
|
||||
trace!("Reading speed for {iface_name} from {:?}", speed_path);
|
||||
match Self::read_sysfs_u32(&speed_path) {
|
||||
Ok(speed) => {
|
||||
trace!("Speed for {iface_name}: {speed} Mbps");
|
||||
Some(speed)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(
|
||||
"Failed to read speed for {}: {} . This is expected to fail on wifi interfaces.",
|
||||
"Failed to read speed for {}: {} (this may be expected on Wi‑Fi interfaces)",
|
||||
iface_name, e
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trace!("Speed file not found for {iface_name}, skipping");
|
||||
None
|
||||
};
|
||||
|
||||
trace!("Reading operstate for {iface_name}");
|
||||
let operstate = Self::read_sysfs_string(&iface_path.join("operstate"))
|
||||
.map_err(|e| format!("Failed to read operstate for {}: {}", iface_name, e))?;
|
||||
trace!("Operstate for {iface_name}: {operstate}");
|
||||
|
||||
trace!("Reading MTU for {iface_name}");
|
||||
let mtu = Self::read_sysfs_u32(&iface_path.join("mtu"))
|
||||
.map_err(|e| format!("Failed to read MTU for {}: {}", iface_name, e))?;
|
||||
trace!("MTU for {iface_name}: {mtu}");
|
||||
|
||||
trace!("Reading driver for {iface_name}");
|
||||
let driver =
|
||||
Self::read_sysfs_symlink_basename(&iface_path.join("device/driver/module"))
|
||||
.map_err(|e| format!("Failed to read driver for {}: {}", iface_name, e))?;
|
||||
trace!("Driver for {iface_name}: {driver}");
|
||||
|
||||
trace!("Reading firmware version for {iface_name}");
|
||||
let firmware_version = Self::read_sysfs_opt_string(
|
||||
&iface_path.join("device/firmware_version"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read firmware version for {}: {}", iface_name, e))?;
|
||||
trace!("Firmware version for {iface_name}: {firmware_version:?}");
|
||||
|
||||
// Get IP addresses using ip command with JSON output
|
||||
trace!("Fetching IP addresses for {iface_name}");
|
||||
let (ipv4_addresses, ipv6_addresses) = Self::get_interface_ips_json(&iface_name)
|
||||
.map_err(|e| format!("Failed to get IP addresses for {}: {}", iface_name, e))?;
|
||||
trace!("Interface {iface_name} has IPv4: {ipv4_addresses:?}, IPv6: {ipv6_addresses:?}");
|
||||
|
||||
interfaces.push(NetworkInterface {
|
||||
name: iface_name,
|
||||
let is_up = operstate == "up";
|
||||
trace!("Constructing NetworkInterface for {iface_name} (is_up={is_up})");
|
||||
|
||||
let iface = NetworkInterface {
|
||||
name: iface_name.clone(),
|
||||
mac_address,
|
||||
speed_mbps,
|
||||
is_up: operstate == "up",
|
||||
is_up,
|
||||
mtu,
|
||||
ipv4_addresses,
|
||||
ipv6_addresses,
|
||||
driver,
|
||||
firmware_version,
|
||||
});
|
||||
};
|
||||
|
||||
debug!("Discovered interface: {iface:?}");
|
||||
interfaces.push(iface);
|
||||
}
|
||||
|
||||
debug!("Discovered total {} network interfaces", interfaces.len());
|
||||
trace!("Interfaces collected: {interfaces:?}");
|
||||
Ok(interfaces)
|
||||
}
|
||||
|
||||
fn gather_management_interface() -> Result<Option<ManagementInterface>, String> {
|
||||
if Path::new("/dev/ipmi0").exists() {
|
||||
let mgmt = if Path::new("/dev/ipmi0").exists() {
|
||||
Ok(Some(ManagementInterface {
|
||||
kind: "IPMI".to_string(),
|
||||
address: None,
|
||||
@@ -612,11 +727,16 @@ impl PhysicalHost {
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Found management interface {mgmt:?}");
|
||||
mgmt
|
||||
}
|
||||
|
||||
fn get_host_uuid() -> Result<String, String> {
|
||||
Self::read_dmi("system-uuid")
|
||||
let uuid = Self::read_dmi("system-uuid");
|
||||
debug!("Found uuid {uuid:?}");
|
||||
uuid
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
@@ -709,7 +829,8 @@ impl PhysicalHost {
|
||||
Ok("Ramdisk".to_string())
|
||||
} else {
|
||||
// Try to determine from device path
|
||||
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))?;
|
||||
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))
|
||||
.unwrap_or(String::new());
|
||||
Ok(subsystem
|
||||
.split('/')
|
||||
.next_back()
|
||||
@@ -779,6 +900,8 @@ impl PhysicalHost {
|
||||
size.map(|s| s as u64)
|
||||
}
|
||||
|
||||
// FIXME when scanning an interface that is part of a bond/bridge we won't get an address on the
|
||||
// interface, we should be looking at the bond/bridge device. For example, br-ex on k8s nodes.
|
||||
fn get_interface_ips_json(iface_name: &str) -> Result<(Vec<String>, Vec<String>), String> {
|
||||
let mut ipv4 = Vec::new();
|
||||
let mut ipv6 = Vec::new();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use log::{debug, error, info, warn};
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use mdns_sd::{ServiceDaemon, ServiceInfo};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -12,6 +12,7 @@ use crate::{
|
||||
/// This function is synchronous and non-blocking. It spawns a background Tokio task
|
||||
/// to handle the mDNS advertisement for the lifetime of the application.
|
||||
pub fn advertise(service_port: u16) -> Result<(), PresenceError> {
|
||||
trace!("starting advertisement process for port {service_port}");
|
||||
let host_id = match PhysicalHost::gather() {
|
||||
Ok(host) => Some(host.host_uuid),
|
||||
Err(e) => {
|
||||
@@ -20,11 +21,15 @@ pub fn advertise(service_port: u16) -> Result<(), PresenceError> {
|
||||
}
|
||||
};
|
||||
|
||||
trace!("Found host id {host_id:?}");
|
||||
|
||||
let instance_name = format!(
|
||||
"inventory-agent-{}",
|
||||
host_id.clone().unwrap_or("unknown".to_string())
|
||||
);
|
||||
|
||||
trace!("Found host id {host_id:?}, name : {instance_name}");
|
||||
|
||||
let spawned_msg = format!("Spawned local presence advertisement task for '{instance_name}'.");
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
||||
@@ -28,7 +28,7 @@ async fn inventory() -> impl Responder {
|
||||
async fn main() -> std::io::Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "8080".to_string());
|
||||
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "25000".to_string());
|
||||
let port = port
|
||||
.parse::<u16>()
|
||||
.expect(&format!("Invalid port number, cannot parse to u16 {port}"));
|
||||
|
||||
@@ -9,3 +9,4 @@ license.workspace = true
|
||||
serde.workspace = true
|
||||
url.workspace = true
|
||||
rand.workspace = true
|
||||
log.workspace = true
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use std::{fmt, str::FromStr};
|
||||
use log::trace;
|
||||
use serde::Serialize;
|
||||
|
||||
/// Simple error type for port parsing failures.
|
||||
#[derive(Debug)]
|
||||
@@ -21,7 +23,7 @@ impl fmt::Display for PortParseError {
|
||||
/// Represents the atomic, physical location of a switch port: `<Stack>/<Module>/<Port>`.
|
||||
///
|
||||
/// Example: `1/1/1`
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Serialize)]
|
||||
pub struct PortLocation(pub u8, pub u8, pub u8);
|
||||
|
||||
impl fmt::Display for PortLocation {
|
||||
@@ -70,6 +72,11 @@ impl FromStr for PortLocation {
|
||||
pub enum PortDeclaration {
|
||||
/// A single switch port defined by its location. Example: `PortDeclaration::Single(1/1/1)`
|
||||
Single(PortLocation),
|
||||
/// A Named port, often used for virtual ports such as PortChannels. Example
|
||||
/// ```rust
|
||||
/// PortDeclaration::Named("1".to_string())
|
||||
/// ```
|
||||
Named(String),
|
||||
/// A strictly sequential range defined by two endpoints using the hyphen separator (`-`).
|
||||
/// All ports between the endpoints (inclusive) are implicitly included.
|
||||
/// Example: `PortDeclaration::Range(1/1/1, 1/1/4)`
|
||||
@@ -130,8 +137,14 @@ impl PortDeclaration {
|
||||
return Ok(PortDeclaration::Set(start_port, end_port));
|
||||
}
|
||||
|
||||
let location = PortLocation::from_str(port_str)?;
|
||||
Ok(PortDeclaration::Single(location))
|
||||
match PortLocation::from_str(port_str) {
|
||||
Ok(loc) => Ok(PortDeclaration::Single(loc)),
|
||||
Err(e) => {
|
||||
trace!("Failed to parse PortLocation {port_str} : {e}");
|
||||
trace!("Falling back on named port");
|
||||
Ok(PortDeclaration::Named(port_str.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,6 +154,7 @@ impl fmt::Display for PortDeclaration {
|
||||
PortDeclaration::Single(port) => write!(f, "{port}"),
|
||||
PortDeclaration::Range(start, end) => write!(f, "{start}-{end}"),
|
||||
PortDeclaration::Set(start, end) => write!(f, "{start}*{end}"),
|
||||
PortDeclaration::Named(name) => write!(f, "{name}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ pub struct Interface {
|
||||
pub physical_interface_name: String,
|
||||
pub descr: Option<MaybeString>,
|
||||
pub mtu: Option<MaybeString>,
|
||||
pub enable: MaybeString,
|
||||
pub enable: Option<MaybeString>,
|
||||
pub lock: Option<MaybeString>,
|
||||
#[yaserde(rename = "spoofmac")]
|
||||
pub spoof_mac: Option<MaybeString>,
|
||||
@@ -134,19 +134,15 @@ mod test {
|
||||
<interfaces>
|
||||
<paul>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</paul>
|
||||
<anotherpaul>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</anotherpaul>
|
||||
<thirdone>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</thirdone>
|
||||
<andgofor4>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</andgofor4>
|
||||
</interfaces>
|
||||
<bar>foo</bar>
|
||||
|
||||
@@ -195,7 +195,7 @@ pub struct System {
|
||||
pub disablechecksumoffloading: u8,
|
||||
pub disablesegmentationoffloading: u8,
|
||||
pub disablelargereceiveoffloading: u8,
|
||||
pub ipv6allow: u8,
|
||||
pub ipv6allow: Option<u8>,
|
||||
pub powerd_ac_mode: String,
|
||||
pub powerd_battery_mode: String,
|
||||
pub powerd_normal_mode: String,
|
||||
@@ -226,6 +226,7 @@ pub struct System {
|
||||
pub dns6gw: Option<String>,
|
||||
pub dns7gw: Option<String>,
|
||||
pub dns8gw: Option<String>,
|
||||
pub prefer_ipv4: Option<String>,
|
||||
pub dnsallowoverride: u8,
|
||||
pub dnsallowoverride_exclude: Option<MaybeString>,
|
||||
}
|
||||
@@ -329,6 +330,7 @@ pub struct Range {
|
||||
pub struct StaticMap {
|
||||
pub mac: String,
|
||||
pub ipaddr: String,
|
||||
pub cid: Option<MaybeString>,
|
||||
pub hostname: String,
|
||||
pub descr: Option<MaybeString>,
|
||||
pub winsserver: MaybeString,
|
||||
@@ -764,9 +766,19 @@ pub struct Jobs {
|
||||
pub struct Job {
|
||||
#[yaserde(attribute = true)]
|
||||
pub uuid: MaybeString,
|
||||
#[yaserde(rename = "name")]
|
||||
pub name: MaybeString,
|
||||
pub name: Option<MaybeString>,
|
||||
// Add other fields as needed
|
||||
pub origin: Option<MaybeString>,
|
||||
pub enabled: Option<MaybeString>,
|
||||
pub minutes: Option<MaybeString>,
|
||||
pub hours: Option<MaybeString>,
|
||||
pub days: Option<MaybeString>,
|
||||
pub months: Option<MaybeString>,
|
||||
pub weekdays: Option<MaybeString>,
|
||||
pub who: Option<MaybeString>,
|
||||
pub command: Option<MaybeString>,
|
||||
pub parameters: Option<MaybeString>,
|
||||
pub description: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -895,28 +907,28 @@ pub struct Proxy {
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct ProxyGeneral {
|
||||
pub enabled: i8,
|
||||
pub error_pages: String,
|
||||
pub error_pages: Option<MaybeString>,
|
||||
#[yaserde(rename = "icpPort")]
|
||||
pub icp_port: MaybeString,
|
||||
pub logging: Logging,
|
||||
#[yaserde(rename = "alternateDNSservers")]
|
||||
pub alternate_dns_servers: MaybeString,
|
||||
#[yaserde(rename = "dnsV4First")]
|
||||
pub dns_v4_first: i8,
|
||||
pub dns_v4_first: Option<MaybeString>,
|
||||
#[yaserde(rename = "forwardedForHandling")]
|
||||
pub forwarded_for_handling: String,
|
||||
pub forwarded_for_handling: Option<MaybeString>,
|
||||
#[yaserde(rename = "uriWhitespaceHandling")]
|
||||
pub uri_whitespace_handling: String,
|
||||
pub uri_whitespace_handling: Option<MaybeString>,
|
||||
#[yaserde(rename = "enablePinger")]
|
||||
pub enable_pinger: i8,
|
||||
#[yaserde(rename = "useViaHeader")]
|
||||
pub use_via_header: i8,
|
||||
pub use_via_header: Option<MaybeString>,
|
||||
#[yaserde(rename = "suppressVersion")]
|
||||
pub suppress_version: i32,
|
||||
pub suppress_version: Option<MaybeString>,
|
||||
#[yaserde(rename = "connecttimeout")]
|
||||
pub connect_timeout: MaybeString,
|
||||
pub connect_timeout: Option<MaybeString>,
|
||||
#[yaserde(rename = "VisibleEmail")]
|
||||
pub visible_email: String,
|
||||
pub visible_email: Option<MaybeString>,
|
||||
#[yaserde(rename = "VisibleHostname")]
|
||||
pub visible_hostname: MaybeString,
|
||||
pub cache: Cache,
|
||||
@@ -953,7 +965,7 @@ pub struct LocalCache {
|
||||
pub cache_mem: i32,
|
||||
pub maximum_object_size: MaybeString,
|
||||
pub maximum_object_size_in_memory: MaybeString,
|
||||
pub memory_cache_mode: String,
|
||||
pub memory_cache_mode: MaybeString,
|
||||
pub size: i32,
|
||||
pub l1: i32,
|
||||
pub l2: i32,
|
||||
@@ -965,13 +977,13 @@ pub struct LocalCache {
|
||||
pub struct Traffic {
|
||||
pub enabled: i32,
|
||||
#[yaserde(rename = "maxDownloadSize")]
|
||||
pub max_download_size: i32,
|
||||
pub max_download_size: MaybeString,
|
||||
#[yaserde(rename = "maxUploadSize")]
|
||||
pub max_upload_size: i32,
|
||||
pub max_upload_size: MaybeString,
|
||||
#[yaserde(rename = "OverallBandwidthTrotteling")]
|
||||
pub overall_bandwidth_trotteling: i32,
|
||||
pub overall_bandwidth_trotteling: MaybeString,
|
||||
#[yaserde(rename = "perHostTrotteling")]
|
||||
pub per_host_trotteling: i32,
|
||||
pub per_host_trotteling: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -988,7 +1000,7 @@ pub struct ParentProxy {
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Forward {
|
||||
pub interfaces: String,
|
||||
pub interfaces: MaybeString,
|
||||
pub port: i32,
|
||||
pub sslbumpport: i32,
|
||||
pub sslbump: i32,
|
||||
@@ -1033,9 +1045,9 @@ pub struct Acl {
|
||||
pub google_apps: MaybeString,
|
||||
pub youtube: MaybeString,
|
||||
#[yaserde(rename = "safePorts")]
|
||||
pub safe_ports: String,
|
||||
pub safe_ports: MaybeString,
|
||||
#[yaserde(rename = "sslPorts")]
|
||||
pub ssl_ports: String,
|
||||
pub ssl_ports: MaybeString,
|
||||
#[yaserde(rename = "remoteACLs")]
|
||||
pub remote_acls: RemoteAcls,
|
||||
}
|
||||
@@ -1051,9 +1063,9 @@ pub struct RemoteAcls {
|
||||
pub struct Icap {
|
||||
pub enable: i32,
|
||||
#[yaserde(rename = "RequestURL")]
|
||||
pub request_url: String,
|
||||
pub request_url: MaybeString,
|
||||
#[yaserde(rename = "ResponseURL")]
|
||||
pub response_url: String,
|
||||
pub response_url: MaybeString,
|
||||
#[yaserde(rename = "SendClientIP")]
|
||||
pub send_client_ip: i32,
|
||||
#[yaserde(rename = "SendUsername")]
|
||||
@@ -1061,7 +1073,7 @@ pub struct Icap {
|
||||
#[yaserde(rename = "EncodeUsername")]
|
||||
pub encode_username: i32,
|
||||
#[yaserde(rename = "UsernameHeader")]
|
||||
pub username_header: String,
|
||||
pub username_header: MaybeString,
|
||||
#[yaserde(rename = "EnablePreview")]
|
||||
pub enable_preview: i32,
|
||||
#[yaserde(rename = "PreviewSize")]
|
||||
@@ -1076,9 +1088,9 @@ pub struct Authentication {
|
||||
pub method: MaybeString,
|
||||
#[yaserde(rename = "authEnforceGroup")]
|
||||
pub auth_enforce_group: MaybeString,
|
||||
pub realm: String,
|
||||
pub credentialsttl: i32, // This field is already in snake_case
|
||||
pub children: i32,
|
||||
pub realm: MaybeString,
|
||||
pub credentialsttl: MaybeString, // This field is already in snake_case
|
||||
pub children: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1293,6 +1305,7 @@ pub struct WireguardServerItem {
|
||||
pub peers: String,
|
||||
pub endpoint: MaybeString,
|
||||
pub peer_dns: MaybeString,
|
||||
pub debug: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1477,6 +1490,7 @@ pub struct Ppp {
|
||||
pub ports: Option<MaybeString>,
|
||||
pub username: Option<MaybeString>,
|
||||
pub password: Option<MaybeString>,
|
||||
pub provider: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
|
||||
@@ -86,10 +86,7 @@ impl<'a> DhcpConfigLegacyISC<'a> {
|
||||
mac,
|
||||
ipaddr: ipaddr.to_string(),
|
||||
hostname,
|
||||
descr: Default::default(),
|
||||
winsserver: Default::default(),
|
||||
dnsserver: Default::default(),
|
||||
ntpserver: Default::default(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
existing_mappings.push(static_map);
|
||||
@@ -126,9 +123,7 @@ impl<'a> DhcpConfigLegacyISC<'a> {
|
||||
ipaddr: entry["ipaddr"].as_str().unwrap_or_default().to_string(),
|
||||
hostname: entry["hostname"].as_str().unwrap_or_default().to_string(),
|
||||
descr: entry["descr"].as_str().map(MaybeString::from),
|
||||
winsserver: MaybeString::default(),
|
||||
dnsserver: MaybeString::default(),
|
||||
ntpserver: MaybeString::default(),
|
||||
..Default::default()
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
||||
Reference in New Issue
Block a user