Compare commits

..

5 Commits

Author SHA1 Message Date
79e406f126 feat(argocd): Can now detect argocd instance when already installed and write crd accordingly. One major caveat though is that crd versions are not managed properly yet
Some checks failed
Run Check Script / check (pull_request) Failing after 16s
2025-10-15 22:27:15 -04:00
0700e30299 wip: install argocd app depending on how argocd is already installed in the cluster 2025-10-15 17:26:48 -04:00
528ee8a696 wip: argocd discovery
Some checks failed
Run Check Script / check (pull_request) Failing after 13s
2025-10-14 21:16:02 -04:00
69a159711a feat: Support tls enabled by default on rust web app
Some checks failed
Run Check Script / check (pull_request) Failing after 15s
2025-10-14 15:53:23 -04:00
b0ad7bb4c4 feat(application): Webapp feature with production dns
Some checks failed
Run Check Script / check (pull_request) Failing after 17s
2025-10-14 15:19:12 -04:00
110 changed files with 1373 additions and 5803 deletions

65
Cargo.lock generated
View File

@@ -429,15 +429,6 @@ dependencies = [
"wait-timeout", "wait-timeout",
] ]
[[package]]
name = "assertor"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb"
dependencies = [
"num-traits",
]
[[package]] [[package]]
name = "async-broadcast" name = "async-broadcast"
version = "0.7.2" version = "0.7.2"
@@ -674,22 +665,6 @@ dependencies = [
"serde_with", "serde_with",
] ]
[[package]]
name = "brocade"
version = "0.1.0"
dependencies = [
"async-trait",
"env_logger",
"harmony_secret",
"harmony_types",
"log",
"regex",
"russh",
"russh-keys",
"serde",
"tokio",
]
[[package]] [[package]]
name = "brotli" name = "brotli"
version = "8.0.2" version = "8.0.2"
@@ -1780,7 +1755,6 @@ dependencies = [
name = "example-nanodc" name = "example-nanodc"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"brocade",
"cidr", "cidr",
"env_logger", "env_logger",
"harmony", "harmony",
@@ -1789,7 +1763,6 @@ dependencies = [
"harmony_tui", "harmony_tui",
"harmony_types", "harmony_types",
"log", "log",
"serde",
"tokio", "tokio",
"url", "url",
] ]
@@ -1808,7 +1781,6 @@ dependencies = [
name = "example-okd-install" name = "example-okd-install"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"brocade",
"cidr", "cidr",
"env_logger", "env_logger",
"harmony", "harmony",
@@ -1823,32 +1795,17 @@ dependencies = [
"url", "url",
] ]
[[package]]
name = "example-openbao"
version = "0.1.0"
dependencies = [
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_types",
"tokio",
"url",
]
[[package]] [[package]]
name = "example-opnsense" name = "example-opnsense"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"brocade",
"cidr", "cidr",
"env_logger", "env_logger",
"harmony", "harmony",
"harmony_macros", "harmony_macros",
"harmony_secret",
"harmony_tui", "harmony_tui",
"harmony_types", "harmony_types",
"log", "log",
"serde",
"tokio", "tokio",
"url", "url",
] ]
@@ -1857,7 +1814,6 @@ dependencies = [
name = "example-pxe" name = "example-pxe"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"brocade",
"cidr", "cidr",
"env_logger", "env_logger",
"harmony", "harmony",
@@ -1872,15 +1828,6 @@ dependencies = [
"url", "url",
] ]
[[package]]
name = "example-remove-rook-osd"
version = "0.1.0"
dependencies = [
"harmony",
"harmony_cli",
"tokio",
]
[[package]] [[package]]
name = "example-rust" name = "example-rust"
version = "0.1.0" version = "0.1.0"
@@ -2358,11 +2305,9 @@ name = "harmony"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"askama", "askama",
"assertor",
"async-trait", "async-trait",
"base64 0.22.1", "base64 0.22.1",
"bollard", "bollard",
"brocade",
"chrono", "chrono",
"cidr", "cidr",
"convert_case", "convert_case",
@@ -2393,7 +2338,6 @@ dependencies = [
"once_cell", "once_cell",
"opnsense-config", "opnsense-config",
"opnsense-config-xml", "opnsense-config-xml",
"option-ext",
"pretty_assertions", "pretty_assertions",
"reqwest 0.11.27", "reqwest 0.11.27",
"russh", "russh",
@@ -3934,7 +3878,6 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
name = "opnsense-config" name = "opnsense-config"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"assertor",
"async-trait", "async-trait",
"chrono", "chrono",
"env_logger", "env_logger",
@@ -4594,9 +4537,9 @@ dependencies = [
[[package]] [[package]]
name = "regex" name = "regex"
version = "1.11.3" version = "1.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
dependencies = [ dependencies = [
"aho-corasick 1.1.3", "aho-corasick 1.1.3",
"memchr", "memchr",
@@ -4606,9 +4549,9 @@ dependencies = [
[[package]] [[package]]
name = "regex-automata" name = "regex-automata"
version = "0.4.11" version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
dependencies = [ dependencies = [
"aho-corasick 1.1.3", "aho-corasick 1.1.3",
"memchr", "memchr",

View File

@@ -14,9 +14,7 @@ members = [
"harmony_composer", "harmony_composer",
"harmony_inventory_agent", "harmony_inventory_agent",
"harmony_secret_derive", "harmony_secret_derive",
"harmony_secret", "harmony_secret", "adr/agent_discovery/mdns",
"adr/agent_discovery/mdns",
"brocade",
] ]
[workspace.package] [workspace.package]
@@ -68,12 +66,5 @@ thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] } serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127" serde_json = "1.0.127"
askama = "0.14" askama = "0.14"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] } sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
reqwest = { version = "0.12", features = [ reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
"blocking",
"stream",
"rustls-tls",
"http2",
"json",
], default-features = false }
assertor = "0.0.4"

View File

@@ -1,18 +0,0 @@
[package]
name = "brocade"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
async-trait.workspace = true
harmony_types = { path = "../harmony_types" }
russh.workspace = true
russh-keys.workspace = true
tokio.workspace = true
log.workspace = true
env_logger.workspace = true
regex = "1.11.3"
harmony_secret = { path = "../harmony_secret" }
serde.workspace = true

View File

@@ -1,70 +0,0 @@
use std::net::{IpAddr, Ipv4Addr};
use brocade::BrocadeOptions;
use harmony_secret::{Secret, SecretManager};
use harmony_types::switch::PortLocation;
use serde::{Deserialize, Serialize};
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
struct BrocadeSwitchAuth {
username: String,
password: String,
}
#[tokio::main]
async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
let switch_addresses = vec![ip];
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.unwrap();
let brocade = brocade::init(
&switch_addresses,
22,
&config.username,
&config.password,
Some(BrocadeOptions {
dry_run: true,
..Default::default()
}),
)
.await
.expect("Brocade client failed to connect");
let entries = brocade.get_stack_topology().await.unwrap();
println!("Stack topology: {entries:#?}");
let entries = brocade.get_interfaces().await.unwrap();
println!("Interfaces: {entries:#?}");
let version = brocade.version().await.unwrap();
println!("Version: {version:?}");
println!("--------------");
let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
println!("VLAN\tMAC\t\t\tPORT");
for mac in mac_adddresses {
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
}
println!("--------------");
let channel_name = "1";
brocade.clear_port_channel(channel_name).await.unwrap();
println!("--------------");
let channel_id = brocade.find_available_channel_id().await.unwrap();
println!("--------------");
let channel_name = "HARMONY_LAG";
let ports = [PortLocation(2, 0, 35)];
brocade
.create_port_channel(channel_id, channel_name, &ports)
.await
.unwrap();
}

View File

@@ -1,212 +0,0 @@
use super::BrocadeClient;
use crate::{
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
};
use async_trait::async_trait;
use harmony_types::switch::{PortDeclaration, PortLocation};
use log::{debug, info};
use regex::Regex;
use std::{collections::HashSet, str::FromStr};
#[derive(Debug)]
pub struct FastIronClient {
shell: BrocadeShell,
version: BrocadeInfo,
}
impl FastIronClient {
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
shell.before_all(vec!["skip-page-display".into()]);
shell.after_all(vec!["page".into()]);
Self {
shell,
version: version_info,
}
}
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
debug!("[Brocade] Parsing mac address entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 3 {
return None;
}
let (vlan, mac_address, port) = match parts.len() {
3 => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[2].to_string(),
),
_ => (
1,
parse_brocade_mac_address(parts[0]).ok()?,
parts[1].to_string(),
),
};
let port =
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
match port {
Ok(p) => Some(Ok(MacAddressEntry {
vlan,
mac_address,
port: p,
})),
Err(e) => Some(Err(e)),
}
}
fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
debug!("[Brocade] Parsing stack port entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
return None;
}
let local_port = PortLocation::from_str(parts[0]).ok()?;
Some(Ok(InterSwitchLink {
local_port,
remote_port: None,
}))
}
fn build_port_channel_commands(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Vec<String> {
let mut commands = vec![
"configure terminal".to_string(),
format!("lag {channel_name} static id {channel_id}"),
];
for port in ports {
commands.push(format!("ports ethernet {port}"));
}
commands.push(format!("primary-port {}", ports[0]));
commands.push("deploy".into());
commands.push("exit".into());
commands.push("write memory".into());
commands.push("exit".into());
commands
}
}
#[async_trait]
impl BrocadeClient for FastIronClient {
async fn version(&self) -> Result<BrocadeInfo, Error> {
Ok(self.version.clone())
}
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
info!("[Brocade] Showing MAC address table...");
let output = self
.shell
.run_command("show mac-address", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(2)
.filter_map(|line| self.parse_mac_entry(line))
.collect()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
let output = self
.shell
.run_command("show interface stack-ports", crate::ExecutionMode::Regular)
.await?;
output
.lines()
.skip(1)
.filter_map(|line| self.parse_stack_port_entry(line))
.collect()
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
todo!()
}
async fn configure_interfaces(
&self,
_interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
todo!()
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
info!("[Brocade] Finding next available channel id...");
let output = self
.shell
.run_command("show lag", ExecutionMode::Regular)
.await?;
let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex");
let used_ids: HashSet<u8> = output
.lines()
.filter_map(|line| {
re.captures(line)
.and_then(|c| c.get(1))
.and_then(|id_match| id_match.as_str().parse().ok())
})
.collect();
let mut next_id: u8 = 1;
loop {
if !used_ids.contains(&next_id) {
break;
}
next_id += 1;
}
info!("[Brocade] Found channel id: {next_id}");
Ok(next_id)
}
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Result<(), Error> {
info!(
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
);
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
self.shell
.run_commands(commands, ExecutionMode::Privileged)
.await?;
info!("[Brocade] Port-channel '{channel_name}' configured.");
Ok(())
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
info!("[Brocade] Clearing port-channel: {channel_name}");
let commands = vec![
"configure terminal".to_string(),
format!("no lag {channel_name}"),
"write memory".to_string(),
];
self.shell
.run_commands(commands, ExecutionMode::Privileged)
.await?;
info!("[Brocade] Port-channel '{channel_name}' cleared.");
Ok(())
}
}

View File

@@ -1,338 +0,0 @@
use std::net::IpAddr;
use std::{
fmt::{self, Display},
time::Duration,
};
use crate::network_operating_system::NetworkOperatingSystemClient;
use crate::{
fast_iron::FastIronClient,
shell::{BrocadeSession, BrocadeShell},
};
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use harmony_types::switch::{PortDeclaration, PortLocation};
use regex::Regex;
mod fast_iron;
mod network_operating_system;
mod shell;
mod ssh;
#[derive(Default, Clone, Debug)]
pub struct BrocadeOptions {
pub dry_run: bool,
pub ssh: ssh::SshOptions,
pub timeouts: TimeoutConfig,
}
#[derive(Clone, Debug)]
pub struct TimeoutConfig {
pub shell_ready: Duration,
pub command_execution: Duration,
pub command_output: Duration,
pub cleanup: Duration,
pub message_wait: Duration,
}
impl Default for TimeoutConfig {
fn default() -> Self {
Self {
shell_ready: Duration::from_secs(10),
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
cleanup: Duration::from_secs(10),
message_wait: Duration::from_millis(500),
}
}
}
enum ExecutionMode {
Regular,
Privileged,
}
#[derive(Clone, Debug)]
pub struct BrocadeInfo {
os: BrocadeOs,
version: String,
}
#[derive(Clone, Debug)]
pub enum BrocadeOs {
NetworkOperatingSystem,
FastIron,
Unknown,
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct MacAddressEntry {
pub vlan: u16,
pub mac_address: MacAddress,
pub port: PortDeclaration,
}
pub type PortChannelId = u8;
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
///
/// This structure provides a standardized view of the topology regardless of the
/// underlying Brocade OS configuration (stacking vs. fabric).
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct InterSwitchLink {
/// The local port on the switch where the topology command was run.
pub local_port: PortLocation,
/// The port on the directly connected neighboring switch.
pub remote_port: Option<PortLocation>,
}
/// Represents the key running configuration status of a single switch interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct InterfaceInfo {
/// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
pub name: String,
/// The physical location of the interface.
pub port_location: PortLocation,
/// The parsed type and name prefix of the interface.
pub interface_type: InterfaceType,
/// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
pub operating_mode: Option<PortOperatingMode>,
/// Indicates the current state of the interface.
pub status: InterfaceStatus,
}
/// Categorizes the functional type of a switch interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum InterfaceType {
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
Ethernet(String),
}
impl fmt::Display for InterfaceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InterfaceType::Ethernet(name) => write!(f, "{name}"),
}
}
}
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PortOperatingMode {
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
Fabric,
/// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
Trunk,
/// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
Access,
}
/// Defines the possible status of an interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum InterfaceStatus {
/// The interface is connected.
Connected,
/// The interface is not connected and is not expected to be.
NotConnected,
/// The interface is not connected but is expected to be (configured with `no shutdown`).
SfpAbsent,
}
pub async fn init(
ip_addresses: &[IpAddr],
port: u16,
username: &str,
password: &str,
options: Option<BrocadeOptions>,
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
let version_info = shell
.with_session(ExecutionMode::Regular, |session| {
Box::pin(get_brocade_info(session))
})
.await?;
Ok(match version_info.os {
BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
BrocadeOs::NetworkOperatingSystem => {
Box::new(NetworkOperatingSystemClient::init(shell, version_info))
}
BrocadeOs::Unknown => todo!(),
})
}
#[async_trait]
pub trait BrocadeClient: std::fmt::Debug {
/// Retrieves the operating system and version details from the connected Brocade switch.
///
/// This is typically the first call made after establishing a connection to determine
/// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
///
/// # Returns
///
/// A `BrocadeInfo` structure containing parsed OS type and version string.
async fn version(&self) -> Result<BrocadeInfo, Error>;
/// Retrieves the dynamically learned MAC address table from the switch.
///
/// This is crucial for discovering where specific network endpoints (MAC addresses)
/// are currently located on the physical ports.
///
/// # Returns
///
/// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
/// and the associated port name/index.
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
/// Derives the physical connections used to link multiple switches together
/// to form a single logical entity (stack, fabric, etc.).
///
/// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
/// to return a standardized view of the topology.
///
/// # Returns
///
/// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
/// If the switch is not stacked, returns an empty vector.
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
/// Retrieves the status for all interfaces
///
/// # Returns
///
/// A vector of `InterfaceInfo` structures.
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
async fn configure_interfaces(
&self,
interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error>;
/// Scans the existing configuration to find the next available (unused)
/// Port-Channel ID (`lag` or `trunk`) for assignment.
///
/// # Returns
///
/// The smallest, unassigned `PortChannelId` within the supported range.
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
/// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
/// using the specified channel ID and ports.
///
/// The resulting configuration must be persistent (saved to startup-config).
/// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
///
/// # Parameters
///
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Result<(), Error>;
/// Removes all configuration associated with the specified Port-Channel name.
///
/// This operation should be idempotent; attempting to clear a non-existent
/// channel should succeed (or return a benign error).
///
/// # Parameters
///
/// * `channel_name`: The name of the Port-Channel (LAG) to delete.
///
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
}
async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> {
let output = session.run_command("show version").await?;
if output.contains("Network Operating System") {
let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)")
.expect("Invalid regex");
let version = re
.captures(&output)
.and_then(|cap| cap.name("version"))
.map(|m| m.as_str().to_string())
.unwrap_or_default();
return Ok(BrocadeInfo {
os: BrocadeOs::NetworkOperatingSystem,
version,
});
} else if output.contains("ICX") {
let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
.expect("Invalid regex");
let version = re
.captures(&output)
.and_then(|cap| cap.name("version"))
.map(|m| m.as_str().to_string())
.unwrap_or_default();
return Ok(BrocadeInfo {
os: BrocadeOs::FastIron,
version,
});
}
Err(Error::UnexpectedError("Unknown Brocade OS version".into()))
}
fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
let cleaned_mac = value.replace('.', "");
if cleaned_mac.len() != 12 {
return Err(format!("Invalid MAC address: {value}"));
}
let mut bytes = [0u8; 6];
for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() {
let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?;
bytes[i] =
u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?;
}
Ok(MacAddress(bytes))
}
#[derive(Debug)]
pub enum Error {
NetworkError(String),
AuthenticationError(String),
ConfigurationError(String),
TimeoutError(String),
UnexpectedError(String),
CommandError(String),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::NetworkError(msg) => write!(f, "Network error: {msg}"),
Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"),
Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"),
Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"),
Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"),
Error::CommandError(msg) => write!(f, "{msg}"),
}
}
}
impl From<Error> for String {
fn from(val: Error) -> Self {
format!("{val}")
}
}
impl std::error::Error for Error {}
impl From<russh::Error> for Error {
fn from(value: russh::Error) -> Self {
Error::NetworkError(format!("Russh client error: {value}"))
}
}

View File

@@ -1,333 +0,0 @@
use std::str::FromStr;
use async_trait::async_trait;
use harmony_types::switch::{PortDeclaration, PortLocation};
use log::{debug, info};
use regex::Regex;
use crate::{
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
parse_brocade_mac_address, shell::BrocadeShell,
};
#[derive(Debug)]
pub struct NetworkOperatingSystemClient {
shell: BrocadeShell,
version: BrocadeInfo,
}
impl NetworkOperatingSystemClient {
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
shell.before_all(vec!["terminal length 0".into()]);
Self {
shell,
version: version_info,
}
}
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
debug!("[Brocade] Parsing mac address entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
return None;
}
let (vlan, mac_address, port) = match parts.len() {
5 => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[4].to_string(),
),
_ => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[5].to_string(),
),
};
let port =
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
match port {
Ok(p) => Some(Ok(MacAddressEntry {
vlan,
mac_address,
port: p,
})),
Err(e) => Some(Err(e)),
}
}
fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
debug!("[Brocade] Parsing inter switch link entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
return None;
}
let local_port = PortLocation::from_str(parts[2]).ok()?;
let remote_port = PortLocation::from_str(parts[5]).ok()?;
Some(Ok(InterSwitchLink {
local_port,
remote_port: Some(remote_port),
}))
}
fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
debug!("[Brocade] Parsing interface status entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 6 {
return None;
}
let interface_type = match parts[0] {
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
_ => return None,
};
let port_location = PortLocation::from_str(parts[1]).ok()?;
let status = match parts[2] {
"connected" => InterfaceStatus::Connected,
"notconnected" => InterfaceStatus::NotConnected,
"sfpAbsent" => InterfaceStatus::SfpAbsent,
_ => return None,
};
let operating_mode = match parts[3] {
"ISL" => Some(PortOperatingMode::Fabric),
"Trunk" => Some(PortOperatingMode::Trunk),
"Access" => Some(PortOperatingMode::Access),
"--" => None,
_ => return None,
};
Some(Ok(InterfaceInfo {
name: format!("{interface_type} {port_location}"),
port_location,
interface_type,
operating_mode,
status,
}))
}
fn map_configure_interfaces_error(&self, err: Error) -> Error {
debug!("[Brocade] {err}");
if let Error::CommandError(message) = &err {
if message.contains("switchport")
&& message.contains("Cannot configure aggregator member")
{
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
if let Some(caps) = re.captures(message) {
let interface_type = &caps[1];
let port_location = &caps[2];
let interface = format!("{interface_type} {port_location}");
return Error::CommandError(format!(
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
));
}
}
}
err
}
}
#[async_trait]
impl BrocadeClient for NetworkOperatingSystemClient {
async fn version(&self) -> Result<BrocadeInfo, Error> {
Ok(self.version.clone())
}
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
let output = self
.shell
.run_command("show mac-address-table", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(1)
.filter_map(|line| self.parse_mac_entry(line))
.collect()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
let output = self
.shell
.run_command("show fabric isl", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(6)
.filter_map(|line| self.parse_inter_switch_link_entry(line))
.collect()
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
let output = self
.shell
.run_command(
"show interface status rbridge-id all",
ExecutionMode::Regular,
)
.await?;
output
.lines()
.skip(2)
.filter_map(|line| self.parse_interface_status_entry(line))
.collect()
}
async fn configure_interfaces(
&self,
interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
let mut commands = vec!["configure terminal".to_string()];
for interface in interfaces {
commands.push(format!("interface {}", interface.0));
match interface.1 {
PortOperatingMode::Fabric => {
commands.push("fabric isl enable".into());
commands.push("fabric trunk enable".into());
}
PortOperatingMode::Trunk => {
commands.push("switchport".into());
commands.push("switchport mode trunk".into());
commands.push("no spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
}
PortOperatingMode::Access => {
commands.push("switchport".into());
commands.push("switchport mode access".into());
commands.push("switchport access vlan 1".into());
commands.push("no spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
}
}
commands.push("no shutdown".into());
commands.push("exit".into());
}
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await
.map_err(|err| self.map_configure_interfaces_error(err))?;
info!("[Brocade] Interfaces configured.");
Ok(())
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
info!("[Brocade] Finding next available channel id...");
let output = self
.shell
.run_command("show port-channel summary", ExecutionMode::Regular)
.await?;
let used_ids: Vec<u8> = output
.lines()
.skip(6)
.filter_map(|line| {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 8 {
return None;
}
u8::from_str(parts[0]).ok()
})
.collect();
let mut next_id: u8 = 1;
loop {
if !used_ids.contains(&next_id) {
break;
}
next_id += 1;
}
info!("[Brocade] Found channel id: {next_id}");
Ok(next_id)
}
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Result<(), Error> {
info!(
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
ports
.iter()
.map(|p| format!("{p}"))
.collect::<Vec<String>>()
.join(", ")
);
let interfaces = self.get_interfaces().await?;
let mut commands = vec![
"configure terminal".into(),
format!("interface port-channel {}", channel_id),
"no shutdown".into(),
"exit".into(),
];
for port in ports {
let interface = interfaces.iter().find(|i| i.port_location == *port);
let Some(interface) = interface else {
continue;
};
commands.push(format!("interface {}", interface.name));
commands.push("no switchport".into());
commands.push("no ip address".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
commands.push(format!("channel-group {channel_id} mode active"));
commands.push("no shutdown".into());
commands.push("exit".into());
}
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Port-channel '{channel_name}' configured.");
Ok(())
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
info!("[Brocade] Clearing port-channel: {channel_name}");
let commands = vec![
"configure terminal".into(),
format!("no interface port-channel {}", channel_name),
"exit".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Port-channel '{channel_name}' cleared.");
Ok(())
}
}

View File

@@ -1,370 +0,0 @@
use std::net::IpAddr;
use std::time::Duration;
use std::time::Instant;
use crate::BrocadeOptions;
use crate::Error;
use crate::ExecutionMode;
use crate::TimeoutConfig;
use crate::ssh;
use log::debug;
use log::info;
use russh::ChannelMsg;
use tokio::time::timeout;
#[derive(Debug)]
pub struct BrocadeShell {
ip: IpAddr,
port: u16,
username: String,
password: String,
options: BrocadeOptions,
before_all_commands: Vec<String>,
after_all_commands: Vec<String>,
}
impl BrocadeShell {
pub async fn init(
ip_addresses: &[IpAddr],
port: u16,
username: &str,
password: &str,
options: Option<BrocadeOptions>,
) -> Result<Self, Error> {
let ip = ip_addresses
.first()
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
let base_options = options.unwrap_or_default();
let options = ssh::try_init_client(username, password, ip, base_options).await?;
Ok(Self {
ip: *ip,
port,
username: username.to_string(),
password: password.to_string(),
before_all_commands: vec![],
after_all_commands: vec![],
options,
})
}
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
BrocadeSession::open(
self.ip,
self.port,
&self.username,
&self.password,
self.options.clone(),
mode,
)
.await
}
pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error>
where
F: FnOnce(
&mut BrocadeSession,
) -> std::pin::Pin<
Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>,
>,
{
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = callback(&mut session).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = session.run_command(command).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub async fn run_commands(
&self,
commands: Vec<String>,
mode: ExecutionMode,
) -> Result<(), Error> {
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = session.run_commands(commands).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub fn before_all(&mut self, commands: Vec<String>) {
self.before_all_commands = commands;
}
pub fn after_all(&mut self, commands: Vec<String>) {
self.after_all_commands = commands;
}
}
pub struct BrocadeSession {
pub channel: russh::Channel<russh::client::Msg>,
pub mode: ExecutionMode,
pub options: BrocadeOptions,
}
impl BrocadeSession {
pub async fn open(
ip: IpAddr,
port: u16,
username: &str,
password: &str,
options: BrocadeOptions,
mode: ExecutionMode,
) -> Result<Self, Error> {
let client = ssh::create_client(ip, port, username, password, &options).await?;
let mut channel = client.channel_open_session().await?;
channel
.request_pty(false, "vt100", 80, 24, 0, 0, &[])
.await?;
channel.request_shell(false).await?;
wait_for_shell_ready(&mut channel, &options.timeouts).await?;
if let ExecutionMode::Privileged = mode {
try_elevate_session(&mut channel, username, password, &options.timeouts).await?;
}
Ok(Self {
channel,
mode,
options,
})
}
pub async fn close(&mut self) -> Result<(), Error> {
debug!("[Brocade] Closing session...");
self.channel.data(&b"exit\n"[..]).await?;
if let ExecutionMode::Privileged = self.mode {
self.channel.data(&b"exit\n"[..]).await?;
}
let start = Instant::now();
while start.elapsed() < self.options.timeouts.cleanup {
match timeout(self.options.timeouts.message_wait, self.channel.wait()).await {
Ok(Some(ChannelMsg::Close)) => break,
Ok(Some(_)) => continue,
Ok(None) | Err(_) => break,
}
}
debug!("[Brocade] Session closed.");
Ok(())
}
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
if self.should_skip_command(command) {
return Ok(String::new());
}
debug!("[Brocade] Running command: '{command}'...");
self.channel
.data(format!("{}\n", command).as_bytes())
.await?;
tokio::time::sleep(Duration::from_millis(100)).await;
let output = self.collect_command_output().await?;
let output = String::from_utf8(output)
.map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?;
self.check_for_command_errors(&output, command)?;
Ok(output)
}
pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> {
for command in commands {
self.run_command(&command).await?;
}
Ok(())
}
fn should_skip_command(&self, command: &str) -> bool {
if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
return true;
}
false
}
async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
let mut output = Vec::new();
let start = Instant::now();
let read_timeout = Duration::from_millis(500);
let log_interval = Duration::from_secs(5);
let mut last_log = Instant::now();
loop {
if start.elapsed() > self.options.timeouts.command_execution {
return Err(Error::TimeoutError(
"Timeout waiting for command completion.".into(),
));
}
if start.elapsed() > self.options.timeouts.command_output
&& last_log.elapsed() > log_interval
{
info!("[Brocade] Waiting for command output...");
last_log = Instant::now();
}
match timeout(read_timeout, self.channel.wait()).await {
Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => {
output.extend_from_slice(&data);
let current_output = String::from_utf8_lossy(&output);
if current_output.contains('>') || current_output.contains('#') {
return Ok(output);
}
}
Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output),
Ok(Some(ChannelMsg::ExitStatus { exit_status })) => {
debug!("[Brocade] Command exit status: {exit_status}");
}
Ok(Some(_)) => continue,
Ok(None) | Err(_) => {
if output.is_empty() {
if let Ok(None) = timeout(read_timeout, self.channel.wait()).await {
break;
}
continue;
}
tokio::time::sleep(Duration::from_millis(100)).await;
let current_output = String::from_utf8_lossy(&output);
if current_output.contains('>') || current_output.contains('#') {
return Ok(output);
}
}
}
}
Ok(output)
}
fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
const ERROR_PATTERNS: &[&str] = &[
"invalid input",
"syntax error",
"command not found",
"unknown command",
"permission denied",
"access denied",
"authentication failed",
"configuration error",
"failed to",
"error:",
];
let output_lower = output.to_lowercase();
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
return Err(Error::CommandError(format!(
"Command error: {}",
output.trim()
)));
}
if !command.starts_with("show") && output.trim().is_empty() {
return Err(Error::CommandError(format!(
"Command '{command}' produced no output"
)));
}
Ok(())
}
}
async fn wait_for_shell_ready(
channel: &mut russh::Channel<russh::client::Msg>,
timeouts: &TimeoutConfig,
) -> Result<(), Error> {
let mut buffer = Vec::new();
let start = Instant::now();
while start.elapsed() < timeouts.shell_ready {
match timeout(timeouts.message_wait, channel.wait()).await {
Ok(Some(ChannelMsg::Data { data })) => {
buffer.extend_from_slice(&data);
let output = String::from_utf8_lossy(&buffer);
let output = output.trim();
if output.ends_with('>') || output.ends_with('#') {
debug!("[Brocade] Shell ready");
return Ok(());
}
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
Ok(())
}
async fn try_elevate_session(
channel: &mut russh::Channel<russh::client::Msg>,
username: &str,
password: &str,
timeouts: &TimeoutConfig,
) -> Result<(), Error> {
channel.data(&b"enable\n"[..]).await?;
let start = Instant::now();
let mut buffer = Vec::new();
while start.elapsed() < timeouts.shell_ready {
match timeout(timeouts.message_wait, channel.wait()).await {
Ok(Some(ChannelMsg::Data { data })) => {
buffer.extend_from_slice(&data);
let output = String::from_utf8_lossy(&buffer);
if output.ends_with('#') {
debug!("[Brocade] Privileged mode established");
return Ok(());
}
if output.contains("User Name:") {
channel.data(format!("{}\n", username).as_bytes()).await?;
buffer.clear();
} else if output.contains("Password:") {
channel.data(format!("{}\n", password).as_bytes()).await?;
buffer.clear();
} else if output.contains('>') {
return Err(Error::AuthenticationError(
"Enable authentication failed".into(),
));
}
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
let output = String::from_utf8_lossy(&buffer);
if output.ends_with('#') {
debug!("[Brocade] Privileged mode established");
Ok(())
} else {
Err(Error::AuthenticationError(format!(
"Enable failed. Output:\n{output}"
)))
}
}

View File

@@ -1,113 +0,0 @@
use std::borrow::Cow;
use std::sync::Arc;
use async_trait::async_trait;
use russh::client::Handler;
use russh::kex::DH_G1_SHA1;
use russh::kex::ECDH_SHA2_NISTP256;
use russh_keys::key::SSH_RSA;
use super::BrocadeOptions;
use super::Error;
#[derive(Default, Clone, Debug)]
pub struct SshOptions {
pub preferred_algorithms: russh::Preferred,
}
impl SshOptions {
fn ecdhsa_sha2_nistp256() -> Self {
Self {
preferred_algorithms: russh::Preferred {
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
key: Cow::Borrowed(&[SSH_RSA]),
..Default::default()
},
}
}
fn legacy() -> Self {
Self {
preferred_algorithms: russh::Preferred {
kex: Cow::Borrowed(&[DH_G1_SHA1]),
key: Cow::Borrowed(&[SSH_RSA]),
..Default::default()
},
}
}
}
pub struct Client;
#[async_trait]
impl Handler for Client {
type Error = Error;
async fn check_server_key(
&mut self,
_server_public_key: &russh_keys::key::PublicKey,
) -> Result<bool, Self::Error> {
Ok(true)
}
}
pub async fn try_init_client(
username: &str,
password: &str,
ip: &std::net::IpAddr,
base_options: BrocadeOptions,
) -> Result<BrocadeOptions, Error> {
let ssh_options = vec![
SshOptions::default(),
SshOptions::ecdhsa_sha2_nistp256(),
SshOptions::legacy(),
];
for ssh in ssh_options {
let opts = BrocadeOptions {
ssh,
..base_options.clone()
};
let client = create_client(*ip, 22, username, password, &opts).await;
match client {
Ok(_) => {
return Ok(opts);
}
Err(e) => match e {
Error::NetworkError(e) => {
if e.contains("No common key exchange algorithm") {
continue;
} else {
return Err(Error::NetworkError(e));
}
}
_ => return Err(e),
},
}
}
Err(Error::NetworkError(
"Could not establish ssh connection: wrong key exchange algorithm)".to_string(),
))
}
pub async fn create_client(
ip: std::net::IpAddr,
port: u16,
username: &str,
password: &str,
options: &BrocadeOptions,
) -> Result<russh::client::Handle<Client>, Error> {
let config = russh::client::Config {
preferred: options.ssh.preferred_algorithms.clone(),
..Default::default()
};
let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?;
if !client.authenticate_password(username, password).await? {
return Err(Error::AuthenticationError(
"ssh authentication failed".to_string(),
));
}
Ok(client)
}

View File

@@ -27,6 +27,7 @@ async fn main() {
}; };
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "example-monitoring".to_string(), name: "example-monitoring".to_string(),
dns: "example-monitoring.harmony.mcd".to_string(),
project_root: PathBuf::from("./examples/rust/webapp"), project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),
service_port: 3000, service_port: 3000,

View File

@@ -17,5 +17,3 @@ harmony_secret = { path = "../../harmony_secret" }
log = { workspace = true } log = { workspace = true }
env_logger = { workspace = true } env_logger = { workspace = true }
url = { workspace = true } url = { workspace = true }
serde = { workspace = true }
brocade = { path = "../../brocade" }

View File

@@ -3,13 +3,12 @@ use std::{
sync::Arc, sync::Arc,
}; };
use brocade::BrocadeOptions;
use cidr::Ipv4Cidr; use cidr::Ipv4Cidr;
use harmony::{ use harmony::{
config::secret::SshKeyPair, config::secret::SshKeyPair,
data::{FileContent, FilePath}, data::{FileContent, FilePath},
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
http::StaticFilesHttpScore, http::StaticFilesHttpScore,
@@ -23,9 +22,8 @@ use harmony::{
topology::{LogicalHost, UnmanagedRouter}, topology::{LogicalHost, UnmanagedRouter},
}; };
use harmony_macros::{ip, mac_address}; use harmony_macros::{ip, mac_address};
use harmony_secret::{Secret, SecretManager}; use harmony_secret::SecretManager;
use harmony_types::net::Url; use harmony_types::net::Url;
use serde::{Deserialize, Serialize};
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
@@ -34,26 +32,6 @@ async fn main() {
name: String::from("fw0"), name: String::from("fw0"),
}; };
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.expect("Failed to get credentials");
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
let brocade_options = Some(BrocadeOptions {
dry_run: *harmony::config::DRY_RUN,
..Default::default()
});
let switch_client = BrocadeSwitchClient::init(
&switches,
&switch_auth.username,
&switch_auth.password,
brocade_options,
)
.await
.expect("Failed to connect to switch");
let switch_client = Arc::new(switch_client);
let opnsense = Arc::new( let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await, harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
); );
@@ -61,7 +39,6 @@ async fn main() {
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1); let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
let gateway_ip = IpAddr::V4(gateway_ipv4); let gateway_ip = IpAddr::V4(gateway_ipv4);
let topology = harmony::topology::HAClusterTopology { let topology = harmony::topology::HAClusterTopology {
kubeconfig: None,
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
// when setting up the opnsense firewall // when setting up the opnsense firewall
router: Arc::new(UnmanagedRouter::new( router: Arc::new(UnmanagedRouter::new(
@@ -106,8 +83,7 @@ async fn main() {
name: "wk2".to_string(), name: "wk2".to_string(),
}, },
], ],
node_exporter: opnsense.clone(), switch: vec![],
switch_client: switch_client.clone(),
}; };
let inventory = Inventory { let inventory = Inventory {
@@ -190,9 +166,3 @@ async fn main() {
.await .await
.unwrap(); .unwrap();
} }
#[derive(Secret, Serialize, Deserialize, Debug)]
pub struct BrocadeSwitchAuth {
pub username: String,
pub password: String,
}

View File

@@ -19,4 +19,3 @@ log = { workspace = true }
env_logger = { workspace = true } env_logger = { workspace = true }
url = { workspace = true } url = { workspace = true }
serde.workspace = true serde.workspace = true
brocade = { path = "../../brocade" }

View File

@@ -1,8 +1,7 @@
use brocade::BrocadeOptions;
use cidr::Ipv4Cidr; use cidr::Ipv4Cidr;
use harmony::{ use harmony::{
hardware::{Location, SwitchGroup}, hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory, inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
}; };
@@ -23,26 +22,6 @@ pub async fn get_topology() -> HAClusterTopology {
name: String::from("opnsense-1"), name: String::from("opnsense-1"),
}; };
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.expect("Failed to get credentials");
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
let brocade_options = Some(BrocadeOptions {
dry_run: *harmony::config::DRY_RUN,
..Default::default()
});
let switch_client = BrocadeSwitchClient::init(
&switches,
&switch_auth.username,
&switch_auth.password,
brocade_options,
)
.await
.expect("Failed to connect to switch");
let switch_client = Arc::new(switch_client);
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await; let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
let config = config.unwrap(); let config = config.unwrap();
@@ -59,7 +38,6 @@ pub async fn get_topology() -> HAClusterTopology {
let gateway_ipv4 = ipv4!("192.168.1.1"); let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4); let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology { harmony::topology::HAClusterTopology {
kubeconfig: None,
domain_name: "demo.harmony.mcd".to_string(), domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new( router: Arc::new(UnmanagedRouter::new(
gateway_ip, gateway_ip,
@@ -80,8 +58,7 @@ pub async fn get_topology() -> HAClusterTopology {
name: "bootstrap".to_string(), name: "bootstrap".to_string(),
}, },
workers: vec![], workers: vec![],
node_exporter: opnsense.clone(), switch: vec![],
switch_client: switch_client.clone(),
} }
} }
@@ -98,9 +75,3 @@ pub fn get_inventory() -> Inventory {
control_plane_host: vec![], control_plane_host: vec![],
} }
} }
#[derive(Secret, Serialize, Deserialize, Debug)]
pub struct BrocadeSwitchAuth {
pub username: String,
pub password: String,
}

View File

@@ -19,4 +19,3 @@ log = { workspace = true }
env_logger = { workspace = true } env_logger = { workspace = true }
url = { workspace = true } url = { workspace = true }
serde.workspace = true serde.workspace = true
brocade = { path = "../../brocade" }

View File

@@ -1,15 +1,13 @@
use brocade::BrocadeOptions;
use cidr::Ipv4Cidr; use cidr::Ipv4Cidr;
use harmony::{ use harmony::{
config::secret::OPNSenseFirewallCredentials, config::secret::OPNSenseFirewallCredentials,
hardware::{Location, SwitchGroup}, hardware::{Location, SwitchGroup},
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory, inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
}; };
use harmony_macros::{ip, ipv4}; use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager}; use harmony_secret::SecretManager;
use serde::{Deserialize, Serialize};
use std::{net::IpAddr, sync::Arc}; use std::{net::IpAddr, sync::Arc};
pub async fn get_topology() -> HAClusterTopology { pub async fn get_topology() -> HAClusterTopology {
@@ -18,26 +16,6 @@ pub async fn get_topology() -> HAClusterTopology {
name: String::from("opnsense-1"), name: String::from("opnsense-1"),
}; };
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.expect("Failed to get credentials");
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
let brocade_options = Some(BrocadeOptions {
dry_run: *harmony::config::DRY_RUN,
..Default::default()
});
let switch_client = BrocadeSwitchClient::init(
&switches,
&switch_auth.username,
&switch_auth.password,
brocade_options,
)
.await
.expect("Failed to connect to switch");
let switch_client = Arc::new(switch_client);
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await; let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
let config = config.unwrap(); let config = config.unwrap();
@@ -54,7 +32,6 @@ pub async fn get_topology() -> HAClusterTopology {
let gateway_ipv4 = ipv4!("192.168.1.1"); let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4); let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology { harmony::topology::HAClusterTopology {
kubeconfig: None,
domain_name: "demo.harmony.mcd".to_string(), domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new( router: Arc::new(UnmanagedRouter::new(
gateway_ip, gateway_ip,
@@ -75,8 +52,7 @@ pub async fn get_topology() -> HAClusterTopology {
name: "cp0".to_string(), name: "cp0".to_string(),
}, },
workers: vec![], workers: vec![],
node_exporter: opnsense.clone(), switch: vec![],
switch_client: switch_client.clone(),
} }
} }
@@ -93,9 +69,3 @@ pub fn get_inventory() -> Inventory {
control_plane_host: vec![], control_plane_host: vec![],
} }
} }
#[derive(Secret, Serialize, Deserialize, Debug)]
pub struct BrocadeSwitchAuth {
pub username: String,
pub password: String,
}

View File

@@ -1,14 +0,0 @@
[package]
name = "example-openbao"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_macros = { path = "../../harmony_macros" }
harmony_types = { path = "../../harmony_types" }
tokio.workspace = true
url.workspace = true

View File

@@ -1,7 +0,0 @@
To install an openbao instance with harmony simply `cargo run -p example-openbao` .
Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster.
Then follow the openbao documentation to initialize and unseal, this will make openbao usable.
https://openbao.org/docs/platform/k8s/helm/run/

View File

@@ -1,67 +0,0 @@
use std::{collections::HashMap, str::FromStr};
use harmony::{
inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
topology::K8sAnywhereTopology,
};
use harmony_macros::hurl;
#[tokio::main]
async fn main() {
let values_yaml = Some(
r#"server:
standalone:
enabled: true
config: |
listener "tcp" {
tls_disable = true
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/openbao/data"
}
service:
enabled: true
dataStorage:
enabled: true
size: 10Gi
storageClass: null
accessMode: ReadWriteOnce
auditStorage:
enabled: true
size: 10Gi
storageClass: null
accessMode: ReadWriteOnce"#
.to_string(),
);
let openbao = HelmChartScore {
namespace: Some(NonBlankString::from_str("openbao").unwrap()),
release_name: NonBlankString::from_str("openbao").unwrap(),
chart_name: NonBlankString::from_str("openbao/openbao").unwrap(),
chart_version: None,
values_overrides: None,
values_yaml,
create_namespace: true,
install_only: true,
repository: Some(HelmRepository::new(
"openbao".to_string(),
hurl!("https://openbao.github.io/openbao-helm"),
true,
)),
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(openbao)],
None,
)
.await
.unwrap();
}

View File

@@ -16,6 +16,3 @@ harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true } log = { workspace = true }
env_logger = { workspace = true } env_logger = { workspace = true }
url = { workspace = true } url = { workspace = true }
harmony_secret = { path = "../../harmony_secret" }
brocade = { path = "../../brocade" }
serde = { workspace = true }

View File

@@ -3,11 +3,10 @@ use std::{
sync::Arc, sync::Arc,
}; };
use brocade::BrocadeOptions;
use cidr::Ipv4Cidr; use cidr::Ipv4Cidr;
use harmony::{ use harmony::{
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
dummy::{ErrorScore, PanicScore, SuccessScore}, dummy::{ErrorScore, PanicScore, SuccessScore},
@@ -19,9 +18,7 @@ use harmony::{
topology::{LogicalHost, UnmanagedRouter}, topology::{LogicalHost, UnmanagedRouter},
}; };
use harmony_macros::{ip, mac_address}; use harmony_macros::{ip, mac_address};
use harmony_secret::{Secret, SecretManager};
use harmony_types::net::Url; use harmony_types::net::Url;
use serde::{Deserialize, Serialize};
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
@@ -30,26 +27,6 @@ async fn main() {
name: String::from("opnsense-1"), name: String::from("opnsense-1"),
}; };
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.expect("Failed to get credentials");
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
let brocade_options = Some(BrocadeOptions {
dry_run: *harmony::config::DRY_RUN,
..Default::default()
});
let switch_client = BrocadeSwitchClient::init(
&switches,
&switch_auth.username,
&switch_auth.password,
brocade_options,
)
.await
.expect("Failed to connect to switch");
let switch_client = Arc::new(switch_client);
let opnsense = Arc::new( let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await, harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
); );
@@ -57,7 +34,6 @@ async fn main() {
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1); let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
let gateway_ip = IpAddr::V4(gateway_ipv4); let gateway_ip = IpAddr::V4(gateway_ipv4);
let topology = harmony::topology::HAClusterTopology { let topology = harmony::topology::HAClusterTopology {
kubeconfig: None,
domain_name: "demo.harmony.mcd".to_string(), domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new( router: Arc::new(UnmanagedRouter::new(
gateway_ip, gateway_ip,
@@ -78,8 +54,7 @@ async fn main() {
name: "cp0".to_string(), name: "cp0".to_string(),
}, },
workers: vec![], workers: vec![],
node_exporter: opnsense.clone(), switch: vec![],
switch_client: switch_client.clone(),
}; };
let inventory = Inventory { let inventory = Inventory {
@@ -134,9 +109,3 @@ async fn main() {
.await .await
.unwrap(); .unwrap();
} }
#[derive(Secret, Serialize, Deserialize, Debug)]
pub struct BrocadeSwitchAuth {
pub username: String,
pub password: String,
}

View File

@@ -1,21 +0,0 @@
[package]
name = "example-opnsense-node-exporter"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true
async-trait.workspace = true

View File

@@ -1,79 +0,0 @@
use std::{
net::{IpAddr, Ipv4Addr},
sync::Arc,
};
use async_trait::async_trait;
use cidr::Ipv4Cidr;
use harmony::{
executors::ExecutorError,
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
modules::opnsense::node_exporter::NodeExporterScore,
topology::{
HAClusterTopology, LogicalHost, PreparationError, PreparationOutcome, Topology,
UnmanagedRouter, node_exporter::NodeExporter,
},
};
use harmony_macros::{ip, ipv4, mac_address};
struct OpnSenseTopology {
node_exporter: Arc<dyn NodeExporter>,
}
#[async_trait]
impl Topology for OpnSenseTopology {
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
Ok(PreparationOutcome::Success {
details: "Success".to_string(),
})
}
fn name(&self) -> &str {
"OpnsenseTopology"
}
}
#[async_trait]
impl NodeExporter for OpnSenseTopology {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.node_exporter.ensure_initialized().await
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.node_exporter.commit_config().await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.node_exporter.reload_restart().await
}
}
#[tokio::main]
async fn main() {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("fw0"),
};
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
);
let topology = OpnSenseTopology {
node_exporter: opnsense.clone(),
};
let inventory = Inventory::empty();
let node_exporter_score = NodeExporterScore {};
harmony_cli::run(
inventory,
topology,
vec![Box::new(node_exporter_score)],
None,
)
.await
.unwrap();
}

View File

@@ -1,11 +0,0 @@
[package]
name = "example-remove-rook-osd"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
tokio.workspace = true

View File

@@ -1,18 +0,0 @@
use harmony::{
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let ceph_score = CephRemoveOsd {
osd_deployment_name: "rook-ceph-osd-2".to_string(),
rook_ceph_namespace: "rook-ceph".to_string(),
};
let topology = K8sAnywhereTopology::from_env();
let inventory = Inventory::autoload();
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
.await
.unwrap();
}

View File

@@ -16,6 +16,7 @@ use harmony_types::net::Url;
async fn main() { async fn main() {
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "test-rhob-monitoring".to_string(), name: "test-rhob-monitoring".to_string(),
dns: "test-rhob-monitoring.harmony.mcd".to_string(),
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),
service_port: 3000, service_port: 3000,

View File

@@ -19,6 +19,7 @@ use harmony_macros::hurl;
async fn main() { async fn main() {
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(), name: "harmony-example-rust-webapp".to_string(),
dns: "harmony-example-rust-webapp.harmony.mcd".to_string(),
project_root: PathBuf::from("./webapp"), project_root: PathBuf::from("./webapp"),
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),
service_port: 3000, service_port: 3000,

View File

@@ -2,12 +2,11 @@ use harmony::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
application::{ application::{
ApplicationScore, RustWebFramework, RustWebapp, features::{rhob_monitoring::Monitoring, PackagingDeployment}, ApplicationScore, RustWebFramework, RustWebapp
features::{PackagingDeployment, rhob_monitoring::Monitoring},
}, },
monitoring::alert_channel::discord_alert_channel::DiscordWebhook, monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
}, },
topology::K8sAnywhereTopology, topology::{K8sAnywhereTopology, LocalhostTopology},
}; };
use harmony_macros::hurl; use harmony_macros::hurl;
use std::{path::PathBuf, sync::Arc}; use std::{path::PathBuf, sync::Arc};
@@ -22,8 +21,8 @@ async fn main() {
}); });
let discord_webhook = DiscordWebhook { let discord_webhook = DiscordWebhook {
name: "harmony_demo".to_string(), name: "harmony-demo".to_string(),
url: hurl!("http://not_a_url.com"), url: hurl!("https://discord.com/api/webhooks/1415391405681021050/V6KzV41vQ7yvbn7BchejRu9C8OANxy0i2ESZOz2nvCxG8xAY3-2i3s5MS38k568JKTzH"),
}; };
let app = ApplicationScore { let app = ApplicationScore {

View File

@@ -3,7 +3,7 @@ use harmony::{
modules::{ modules::{
application::{ application::{
ApplicationScore, RustWebFramework, RustWebapp, ApplicationScore, RustWebFramework, RustWebapp,
features::{Monitoring, PackagingDeployment}, features::{PackagingDeployment, rhob_monitoring::Monitoring},
}, },
monitoring::alert_channel::discord_alert_channel::DiscordWebhook, monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
}, },
@@ -16,6 +16,7 @@ use std::{path::PathBuf, sync::Arc};
async fn main() { async fn main() {
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "harmony-example-tryrust".to_string(), name: "harmony-example-tryrust".to_string(),
dns: "tryrust.example.harmony.mcd".to_string(),
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
// submodule // submodule
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),

View File

@@ -77,9 +77,6 @@ harmony_secret = { path = "../harmony_secret" }
askama.workspace = true askama.workspace = true
sqlx.workspace = true sqlx.workspace = true
inquire.workspace = true inquire.workspace = true
brocade = { path = "../brocade" }
option-ext = "0.2.0"
[dev-dependencies] [dev-dependencies]
pretty_assertions.workspace = true pretty_assertions.workspace = true
assertor.workspace = true

View File

@@ -30,7 +30,6 @@ pub enum InterpretName {
Lamp, Lamp,
ApplicationMonitoring, ApplicationMonitoring,
K8sPrometheusCrdAlerting, K8sPrometheusCrdAlerting,
CephRemoveOsd,
DiscoverInventoryAgent, DiscoverInventoryAgent,
CephClusterHealth, CephClusterHealth,
Custom(&'static str), Custom(&'static str),
@@ -62,7 +61,6 @@ impl std::fmt::Display for InterpretName {
InterpretName::Lamp => f.write_str("LAMP"), InterpretName::Lamp => f.write_str("LAMP"),
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"), InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"),
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
InterpretName::Custom(name) => f.write_str(name), InterpretName::Custom(name) => f.write_str(name),

View File

@@ -1,31 +1,33 @@
use async_trait::async_trait; use async_trait::async_trait;
use harmony_macros::ip; use harmony_macros::ip;
use harmony_types::{ use harmony_types::net::MacAddress;
net::{MacAddress, Url}, use harmony_types::net::Url;
switch::PortLocation,
};
use kube::api::ObjectMeta;
use log::debug; use log::debug;
use log::info; use log::info;
use crate::data::FileContent;
use crate::executors::ExecutorError;
use crate::topology::PxeOptions; use crate::topology::PxeOptions;
use crate::{data::FileContent, modules::okd::crd::nmstate::NMState};
use crate::{
executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec,
};
use crate::{
modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy},
topology::node_exporter::NodeExporter,
};
use super::{ use super::DHCPStaticEntry;
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig, use super::DhcpServer;
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, use super::DnsRecord;
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer, use super::DnsRecordType;
Topology, k8s::K8sClient, use super::DnsServer;
}; use super::Firewall;
use super::HttpServer;
use super::IpAddress;
use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::PreparationError;
use super::PreparationOutcome;
use super::Router;
use super::TftpServer;
use std::collections::BTreeMap; use super::Topology;
use super::k8s::K8sClient;
use std::sync::Arc; use std::sync::Arc;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@@ -38,12 +40,10 @@ pub struct HAClusterTopology {
pub tftp_server: Arc<dyn TftpServer>, pub tftp_server: Arc<dyn TftpServer>,
pub http_server: Arc<dyn HttpServer>, pub http_server: Arc<dyn HttpServer>,
pub dns_server: Arc<dyn DnsServer>, pub dns_server: Arc<dyn DnsServer>,
pub node_exporter: Arc<dyn NodeExporter>,
pub switch_client: Arc<dyn SwitchClient>,
pub bootstrap_host: LogicalHost, pub bootstrap_host: LogicalHost,
pub control_plane: Vec<LogicalHost>, pub control_plane: Vec<LogicalHost>,
pub workers: Vec<LogicalHost>, pub workers: Vec<LogicalHost>,
pub kubeconfig: Option<String>, pub switch: Vec<LogicalHost>,
} }
#[async_trait] #[async_trait]
@@ -62,17 +62,9 @@ impl Topology for HAClusterTopology {
#[async_trait] #[async_trait]
impl K8sclient for HAClusterTopology { impl K8sclient for HAClusterTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> { async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
match &self.kubeconfig { Ok(Arc::new(
None => Ok(Arc::new( K8sClient::try_default().await.map_err(|e| e.to_string())?,
K8sClient::try_default().await.map_err(|e| e.to_string())?, ))
)),
Some(kubeconfig) => {
let Some(client) = K8sClient::from_kubeconfig(&kubeconfig).await else {
return Err("Failed to create k8s client".to_string());
};
Ok(Arc::new(client))
}
}
} }
} }
@@ -97,193 +89,6 @@ impl HAClusterTopology {
.to_string() .to_string()
} }
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
let k8s_client = self.k8s_client().await?;
debug!("Installing NMState controller...");
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
").unwrap(), Some("nmstate"))
.await
.map_err(|e| e.to_string())?;
debug!("Creating NMState namespace...");
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
").unwrap(), Some("nmstate"))
.await
.map_err(|e| e.to_string())?;
debug!("Creating NMState service account...");
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
").unwrap(), Some("nmstate"))
.await
.map_err(|e| e.to_string())?;
debug!("Creating NMState role...");
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
").unwrap(), Some("nmstate"))
.await
.map_err(|e| e.to_string())?;
debug!("Creating NMState role binding...");
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
").unwrap(), Some("nmstate"))
.await
.map_err(|e| e.to_string())?;
debug!("Creating NMState operator...");
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
").unwrap(), Some("nmstate"))
.await
.map_err(|e| e.to_string())?;
k8s_client
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
.await?;
let nmstate = NMState {
metadata: ObjectMeta {
name: Some("nmstate".to_string()),
..Default::default()
},
..Default::default()
};
debug!("Creating NMState: {nmstate:#?}");
k8s_client
.apply(&nmstate, None)
.await
.map_err(|e| e.to_string())?;
Ok(())
}
fn get_next_bond_id(&self) -> u8 {
42 // FIXME: Find a better way to declare the bond id
}
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
self.ensure_nmstate_operator_installed()
.await
.map_err(|e| {
SwitchError::new(format!(
"Can't configure bond, NMState operator not available: {e}"
))
})?;
let bond_config = self.create_bond_configuration(config);
debug!(
"Applying NMState bond config for host {}: {bond_config:#?}",
config.host_id
);
self.k8s_client()
.await
.unwrap()
.apply(&bond_config, None)
.await
.map_err(|e| SwitchError::new(format!("Failed to configure bond: {e}")))?;
Ok(())
}
fn create_bond_configuration(
&self,
config: &HostNetworkConfig,
) -> NodeNetworkConfigurationPolicy {
let host_name = &config.host_id;
let bond_id = self.get_next_bond_id();
let bond_name = format!("bond{bond_id}");
info!("Configuring bond '{bond_name}' for host '{host_name}'...");
let mut bond_mtu: Option<u32> = None;
let mut copy_mac_from: Option<String> = None;
let mut bond_ports = Vec::new();
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
for switch_port in &config.switch_ports {
let interface_name = switch_port.interface.name.clone();
interfaces.push(nmstate::InterfaceSpec {
name: interface_name.clone(),
description: Some(format!("Member of bond {bond_name}")),
r#type: "ethernet".to_string(),
state: "up".to_string(),
mtu: Some(switch_port.interface.mtu),
mac_address: Some(switch_port.interface.mac_address.to_string()),
ipv4: Some(nmstate::IpStackSpec {
enabled: Some(false),
..Default::default()
}),
ipv6: Some(nmstate::IpStackSpec {
enabled: Some(false),
..Default::default()
}),
link_aggregation: None,
..Default::default()
});
bond_ports.push(interface_name.clone());
// Use the first port's details for the bond mtu and mac address
if bond_mtu.is_none() {
bond_mtu = Some(switch_port.interface.mtu);
}
if copy_mac_from.is_none() {
copy_mac_from = Some(interface_name);
}
}
interfaces.push(nmstate::InterfaceSpec {
name: bond_name.clone(),
description: Some(format!("Network bond for host {host_name}")),
r#type: "bond".to_string(),
state: "up".to_string(),
copy_mac_from,
ipv4: Some(nmstate::IpStackSpec {
dhcp: Some(true),
enabled: Some(true),
..Default::default()
}),
ipv6: Some(nmstate::IpStackSpec {
dhcp: Some(true),
autoconf: Some(true),
enabled: Some(true),
..Default::default()
}),
link_aggregation: Some(nmstate::BondSpec {
mode: "802.3ad".to_string(),
ports: bond_ports,
..Default::default()
}),
..Default::default()
});
NodeNetworkConfigurationPolicy {
metadata: ObjectMeta {
name: Some(format!("{host_name}-bond-config")),
..Default::default()
},
spec: NodeNetworkConfigurationPolicySpec {
node_selector: Some(BTreeMap::from([(
"kubernetes.io/hostname".to_string(),
host_name.to_string(),
)])),
desired_state: nmstate::DesiredStateSpec { interfaces },
},
}
}
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
debug!("Configuring port channel: {config:#?}");
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
self.switch_client
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
.await
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
Ok(())
}
pub fn autoload() -> Self { pub fn autoload() -> Self {
let dummy_infra = Arc::new(DummyInfra {}); let dummy_infra = Arc::new(DummyInfra {});
let dummy_host = LogicalHost { let dummy_host = LogicalHost {
@@ -292,7 +97,6 @@ impl HAClusterTopology {
}; };
Self { Self {
kubeconfig: None,
domain_name: "DummyTopology".to_string(), domain_name: "DummyTopology".to_string(),
router: dummy_infra.clone(), router: dummy_infra.clone(),
load_balancer: dummy_infra.clone(), load_balancer: dummy_infra.clone(),
@@ -301,11 +105,10 @@ impl HAClusterTopology {
tftp_server: dummy_infra.clone(), tftp_server: dummy_infra.clone(),
http_server: dummy_infra.clone(), http_server: dummy_infra.clone(),
dns_server: dummy_infra.clone(), dns_server: dummy_infra.clone(),
node_exporter: dummy_infra.clone(),
switch_client: dummy_infra.clone(),
bootstrap_host: dummy_host, bootstrap_host: dummy_host,
control_plane: vec![], control_plane: vec![],
workers: vec![], workers: vec![],
switch: vec![],
} }
} }
} }
@@ -460,44 +263,6 @@ impl HttpServer for HAClusterTopology {
} }
} }
#[async_trait]
impl Switch for HAClusterTopology {
async fn setup_switch(&self) -> Result<(), SwitchError> {
self.switch_client.setup().await?;
Ok(())
}
async fn get_port_for_mac_address(
&self,
mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError> {
let port = self.switch_client.find_port(mac_address).await?;
Ok(port)
}
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
self.configure_bond(config).await?;
self.configure_port_channel(config).await
}
//TODO add snmp here
}
#[async_trait]
impl NodeExporter for HAClusterTopology {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.node_exporter.ensure_initialized().await
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.node_exporter.commit_config().await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.node_exporter.reload_restart().await
}
}
#[derive(Debug)] #[derive(Debug)]
pub struct DummyInfra; pub struct DummyInfra;
@@ -567,8 +332,8 @@ impl DhcpServer for DummyInfra {
} }
async fn set_dhcp_range( async fn set_dhcp_range(
&self, &self,
_start: &IpAddress, start: &IpAddress,
_end: &IpAddress, end: &IpAddress,
) -> Result<(), ExecutorError> { ) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
} }
@@ -684,40 +449,3 @@ impl DnsServer for DummyInfra {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
} }
} }
#[async_trait]
impl NodeExporter for DummyInfra {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]
impl SwitchClient for DummyInfra {
async fn setup(&self) -> Result<(), SwitchError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn find_port(
&self,
_mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn configure_port_channel(
&self,
_channel_name: &str,
_switch_ports: Vec<PortLocation>,
) -> Result<u8, SwitchError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}

View File

@@ -1,12 +1,10 @@
use std::time::Duration; use std::{collections::HashMap, time::Duration};
use derive_new::new; use derive_new::new;
use k8s_openapi::{ use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope, ClusterResourceScope, NamespaceResourceScope,
api::{ api::{apps::v1::Deployment, core::v1::Pod},
apps::v1::Deployment, apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
core::v1::{Pod, ServiceAccount},
},
apimachinery::pkg::version::Info, apimachinery::pkg::version::Info,
}; };
use kube::{ use kube::{
@@ -14,7 +12,6 @@ use kube::{
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt}, api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig}, config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse, core::ErrorResponse,
discovery::{ApiCapabilities, Scope},
error::DiscoveryError, error::DiscoveryError,
runtime::reflector::Lookup, runtime::reflector::Lookup,
}; };
@@ -23,12 +20,11 @@ use kube::{
api::{ApiResource, GroupVersionKind}, api::{ApiResource, GroupVersionKind},
runtime::wait::await_condition, runtime::wait::await_condition,
}; };
use log::{debug, error, info, trace, warn}; use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use serde_json::json; use serde_json::{json, Value};
use similar::TextDiff; use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep}; use tokio::{io::AsyncReadExt, time::sleep};
use url::Url;
#[derive(new, Clone)] #[derive(new, Clone)]
pub struct K8sClient { pub struct K8sClient {
@@ -62,9 +58,146 @@ impl K8sClient {
}) })
} }
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> { // Returns true if any deployment in the given namespace matching the label selector
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace); // has status.availableReplicas > 0 (or condition Available=True).
api pub async fn has_healthy_deployment_with_label(
&self,
namespace: &str,
label_selector: &str,
) -> Result<bool, Error> {
let api: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
let lp = ListParams::default().labels(label_selector);
let list = api.list(&lp).await?;
for d in list.items {
// Check AvailableReplicas > 0 or Available condition
let available = d
.status
.as_ref()
.and_then(|s| s.available_replicas)
.unwrap_or(0);
if available > 0 {
return Ok(true);
}
// Fallback: scan conditions
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
if conds.iter().any(|c| {
c.type_ == "Available"
&& c.status == "True"
}) {
return Ok(true);
}
}
}
Ok(false)
}
// Cluster-wide: returns namespaces that have at least one healthy deployment
// matching the label selector (equivalent to kubectl -A -l ...).
pub async fn list_namespaces_with_healthy_deployments(
&self,
label_selector: &str,
) -> Result<Vec<String>, Error> {
let api: Api<Deployment> = Api::all(self.client.clone());
let lp = ListParams::default().labels(label_selector);
let list = api.list(&lp).await?;
let mut healthy_ns: HashMap<String, bool> = HashMap::new();
for d in list.items {
let ns = match d.metadata.namespace.clone() {
Some(n) => n,
None => continue,
};
let available = d
.status
.as_ref()
.and_then(|s| s.available_replicas)
.unwrap_or(0);
let is_healthy = if available > 0 {
true
} else {
d.status
.as_ref()
.and_then(|s| s.conditions.as_ref())
.map(|conds| {
conds.iter().any(|c| {
c.type_ == "Available"
&& c.status == "True"
})
})
.unwrap_or(false)
};
if is_healthy {
healthy_ns.insert(ns, true);
}
}
Ok(healthy_ns.into_keys().collect())
}
// Get the application-controller ServiceAccount name (fallback to default)
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> {
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
let list = api.list(&lp).await?;
if let Some(dep) = list.items.get(0) {
if let Some(sa) = dep
.spec
.as_ref()
.and_then(|ds| ds.template.spec.as_ref())
.and_then(|ps| ps.service_account_name.clone())
{
return Ok(sa);
}
}
Ok("argocd-application-controller".to_string())
}
// List ClusterRoleBindings dynamically and return as JSON values
pub async fn list_clusterrolebindings_json(&self) -> Result<Vec<Value>, Error> {
let gvk = kube::api::GroupVersionKind::gvk(
"rbac.authorization.k8s.io",
"v1",
"ClusterRoleBinding",
);
let ar = kube::api::ApiResource::from_gvk(&gvk);
let api: Api<kube::api::DynamicObject> = Api::all_with(self.client.clone(), &ar);
let crbs = api.list(&ListParams::default()).await?;
let mut out = Vec::new();
for o in crbs {
let v = serde_json::to_value(&o).unwrap_or(Value::Null);
out.push(v);
}
Ok(out)
}
// Determine if Argo controller in ns has cluster-wide permissions via CRBs
// TODO This does not belong in the generic k8s client, should be refactored at some point
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> {
let sa = self.get_argocd_controller_sa_name(ns).await?;
let crbs = self.list_clusterrolebindings_json().await?;
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
for crb in crbs {
if let Some(subjects) = crb.get("subjects").and_then(|s| s.as_array()) {
for subj in subjects {
let kind = subj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
let name = subj.get("name").and_then(|v| v.as_str()).unwrap_or("");
let subj_ns = subj.get("namespace").and_then(|v| v.as_str()).unwrap_or("");
if (kind == "ServiceAccount" && name == sa && subj_ns == ns)
|| (kind == "User" && name == sa_user)
{
return Ok(true);
}
}
}
}
Ok(false)
}
pub async fn has_crd(&self, name: &str) -> Result<bool, Error> {
let api: Api<CustomResourceDefinition> = Api::all(self.client.clone());
let lp = ListParams::default().fields(&format!("metadata.name={}", name));
let crds = api.list(&lp).await?;
Ok(!crds.items.is_empty())
} }
pub async fn get_apiserver_version(&self) -> Result<Info, Error> { pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
@@ -90,8 +223,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced_with(self.client.clone(), &gvk) Api::default_namespaced_with(self.client.clone(), &gvk)
}; };
Ok(resource.get(name).await?)
resource.get(name).await
} }
pub async fn get_deployment( pub async fn get_deployment(
@@ -100,15 +232,11 @@ impl K8sClient {
namespace: Option<&str>, namespace: Option<&str>,
) -> Result<Option<Deployment>, Error> { ) -> Result<Option<Deployment>, Error> {
let deps: Api<Deployment> = if let Some(ns) = namespace { let deps: Api<Deployment> = if let Some(ns) = namespace {
debug!("getting namespaced deployment");
Api::namespaced(self.client.clone(), ns) Api::namespaced(self.client.clone(), ns)
} else { } else {
debug!("getting default namespace deployment");
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
Ok(deps.get_opt(name).await?)
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
deps.get_opt(name).await
} }
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> { pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
@@ -117,8 +245,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
Ok(pods.get_opt(name).await?)
pods.get_opt(name).await
} }
pub async fn scale_deployment( pub async fn scale_deployment(
@@ -139,7 +266,7 @@ impl K8sClient {
} }
}); });
let pp = PatchParams::default(); let pp = PatchParams::default();
let scale = Patch::Merge(&patch); let scale = Patch::Apply(&patch);
deployments.patch_scale(name, &pp, &scale).await?; deployments.patch_scale(name, &pp, &scale).await?;
Ok(()) Ok(())
} }
@@ -161,9 +288,9 @@ impl K8sClient {
pub async fn wait_until_deployment_ready( pub async fn wait_until_deployment_ready(
&self, &self,
name: &str, name: String,
namespace: Option<&str>, namespace: Option<&str>,
timeout: Option<Duration>, timeout: Option<u64>,
) -> Result<(), String> { ) -> Result<(), String> {
let api: Api<Deployment>; let api: Api<Deployment>;
@@ -173,9 +300,9 @@ impl K8sClient {
api = Api::default_namespaced(self.client.clone()); api = Api::default_namespaced(self.client.clone());
} }
let establish = await_condition(api, name, conditions::is_deployment_completed()); let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
let timeout = timeout.unwrap_or(Duration::from_secs(120)); let t = timeout.unwrap_or(300);
let res = tokio::time::timeout(timeout, establish).await; let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
if res.is_ok() { if res.is_ok() {
Ok(()) Ok(())
@@ -265,7 +392,7 @@ impl K8sClient {
if let Some(s) = status.status { if let Some(s) = status.status {
let mut stdout_buf = String::new(); let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout() { if let Some(mut stdout) = process.stdout().take() {
stdout stdout
.read_to_string(&mut stdout_buf) .read_to_string(&mut stdout_buf)
.await .await
@@ -371,14 +498,14 @@ impl K8sClient {
Ok(current) => { Ok(current) => {
trace!("Received current value {current:#?}"); trace!("Received current value {current:#?}");
// The resource exists, so we calculate and display a diff. // The resource exists, so we calculate and display a diff.
println!("\nPerforming dry-run for resource: '{name}'"); println!("\nPerforming dry-run for resource: '{}'", name);
let mut current_yaml = serde_yaml::to_value(&current).unwrap_or_else(|_| { let mut current_yaml = serde_yaml::to_value(&current).unwrap_or_else(|_| {
panic!("Could not serialize current value : {current:#?}") panic!("Could not serialize current value : {current:#?}")
}); });
if current_yaml.is_mapping() && current_yaml.get("status").is_some() { if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
let map = current_yaml.as_mapping_mut().unwrap(); let map = current_yaml.as_mapping_mut().unwrap();
let removed = map.remove_entry("status"); let removed = map.remove_entry("status");
trace!("Removed status {removed:?}"); trace!("Removed status {:?}", removed);
} else { } else {
trace!( trace!(
"Did not find status entry for current object {}/{}", "Did not find status entry for current object {}/{}",
@@ -407,14 +534,14 @@ impl K8sClient {
similar::ChangeTag::Insert => "+", similar::ChangeTag::Insert => "+",
similar::ChangeTag::Equal => " ", similar::ChangeTag::Equal => " ",
}; };
print!("{sign}{change}"); print!("{}{}", sign, change);
} }
// In a dry run, we return the new resource state that would have been applied. // In a dry run, we return the new resource state that would have been applied.
Ok(resource.clone()) Ok(resource.clone())
} }
Err(Error::Api(ErrorResponse { code: 404, .. })) => { Err(Error::Api(ErrorResponse { code: 404, .. })) => {
// The resource does not exist, so the "diff" is the entire new resource. // The resource does not exist, so the "diff" is the entire new resource.
println!("\nPerforming dry-run for new resource: '{name}'"); println!("\nPerforming dry-run for new resource: '{}'", name);
println!( println!(
"Resource does not exist. It would be created with the following content:" "Resource does not exist. It would be created with the following content:"
); );
@@ -423,14 +550,14 @@ impl K8sClient {
// Print each line of the new resource with a '+' prefix. // Print each line of the new resource with a '+' prefix.
for line in new_yaml.lines() { for line in new_yaml.lines() {
println!("+{line}"); println!("+{}", line);
} }
// In a dry run, we return the new resource state that would have been created. // In a dry run, we return the new resource state that would have been created.
Ok(resource.clone()) Ok(resource.clone())
} }
Err(e) => { Err(e) => {
// Another API error occurred. // Another API error occurred.
error!("Failed to get resource '{name}': {e}"); error!("Failed to get resource '{}': {}", name, e);
Err(e) Err(e)
} }
} }
@@ -445,7 +572,7 @@ impl K8sClient {
where where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize, K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as Resource>::Scope: ApplyStrategy<K>, <K as Resource>::Scope: ApplyStrategy<K>,
<K as Resource>::DynamicType: Default, <K as kube::Resource>::DynamicType: Default,
{ {
let mut result = Vec::new(); let mut result = Vec::new();
for r in resource.iter() { for r in resource.iter() {
@@ -510,7 +637,10 @@ impl K8sClient {
// 6. Apply the object to the cluster using Server-Side Apply. // 6. Apply the object to the cluster using Server-Side Apply.
// This will create the resource if it doesn't exist, or update it if it does. // This will create the resource if it doesn't exist, or update it if it does.
println!("Applying '{name}' in namespace '{namespace}'...",); println!(
"Applying Argo Application '{}' in namespace '{}'...",
name, namespace
);
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?; let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
@@ -519,51 +649,6 @@ impl K8sClient {
Ok(()) Ok(())
} }
/// Apply a resource from a URL
///
/// It is the equivalent of `kubectl apply -f <url>`
pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
let patch_params = PatchParams::apply("harmony");
let discovery = kube::Discovery::new(self.client.clone()).run().await?;
let yaml = reqwest::get(url)
.await
.expect("Could not get URL")
.text()
.await
.expect("Could not get content from URL");
for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") {
let obj: DynamicObject =
serde_yaml::from_value(doc).expect("cannot apply without valid YAML");
let namespace = obj.metadata.namespace.as_deref().or(ns);
let type_meta = obj
.types
.as_ref()
.expect("cannot apply object without valid TypeMeta");
let gvk = GroupVersionKind::try_from(type_meta)
.expect("cannot apply object without valid GroupVersionKind");
let name = obj.name_any();
if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
trace!(
"Applying {}: \n{}",
gvk.kind,
serde_yaml::to_string(&obj).expect("Failed to serialize YAML")
);
let data: serde_json::Value =
serde_json::to_value(&obj).expect("Failed to serialize JSON");
let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
debug!("applied {} {}", gvk.kind, name);
} else {
warn!("Cannot apply document for unknown {gvk:?}");
}
}
Ok(())
}
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> { pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
let k = match Kubeconfig::read_from(path) { let k = match Kubeconfig::read_from(path) {
Ok(k) => k, Ok(k) => k,
@@ -583,31 +668,6 @@ impl K8sClient {
} }
} }
fn get_dynamic_api(
resource: ApiResource,
capabilities: ApiCapabilities,
client: Client,
ns: Option<&str>,
all: bool,
) -> Api<DynamicObject> {
if capabilities.scope == Scope::Cluster || all {
Api::all_with(client, &resource)
} else if let Some(namespace) = ns {
Api::namespaced_with(client, namespace, &resource)
} else {
Api::default_namespaced_with(client, &resource)
}
}
fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
use serde::Deserialize;
let mut docs = vec![];
for de in serde_yaml::Deserializer::from_str(data) {
docs.push(serde_yaml::Value::deserialize(de)?);
}
Ok(docs)
}
pub trait ApplyStrategy<K: Resource> { pub trait ApplyStrategy<K: Resource> {
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>; fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
} }

View File

@@ -1,13 +1,8 @@
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration}; use std::{process::Command, sync::Arc};
use async_trait::async_trait; use async_trait::async_trait;
use base64::{Engine, engine::general_purpose}; use kube::api::GroupVersionKind;
use k8s_openapi::api::{ use log::{debug, info, trace, warn};
core::v1::Secret,
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
};
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
use log::{debug, info, warn};
use serde::Serialize; use serde::Serialize;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
@@ -17,26 +12,14 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
k3d::K3DInstallationScore, k3d::K3DInstallationScore,
k8s::ingress::{K8sIngressScore, PathType}, monitoring::kube_prometheus::crd::{
monitoring::{ crd_alertmanager_config::CRDPrometheus,
grafana::{grafana::Grafana, helm::helm_grafana::grafana_helm_chart_score}, prometheus_operator::prometheus_operator_helm_chart_score,
kube_prometheus::crd::{ rhob_alertmanager_config::RHOBObservability,
crd_alertmanager_config::CRDPrometheus,
crd_grafana::{
Grafana as GrafanaCRD, GrafanaCom, GrafanaDashboard,
GrafanaDashboardDatasource, GrafanaDashboardSpec, GrafanaDatasource,
GrafanaDatasourceConfig, GrafanaDatasourceJsonData,
GrafanaDatasourceSecureJsonData, GrafanaDatasourceSpec, GrafanaSpec,
},
crd_prometheuses::LabelSelector,
prometheus_operator::prometheus_operator_helm_chart_score,
rhob_alertmanager_config::RHOBObservability,
service_monitor::ServiceMonitor,
},
}, },
prometheus::{ prometheus::{
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore, k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore, prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
}, },
}, },
score::Score, score::Score,
@@ -81,13 +64,14 @@ enum K8sSource {
pub struct K8sAnywhereTopology { pub struct K8sAnywhereTopology {
k8s_state: Arc<OnceCell<Option<K8sState>>>, k8s_state: Arc<OnceCell<Option<K8sState>>>,
tenant_manager: Arc<OnceCell<K8sTenantManager>>, tenant_manager: Arc<OnceCell<K8sTenantManager>>,
k8s_distribution: Arc<OnceCell<KubernetesDistribution>>, flavour: Arc<OnceCell<KubernetesDistribution>>,
config: Arc<K8sAnywhereConfig>, config: Arc<K8sAnywhereConfig>,
} }
#[async_trait] #[async_trait]
impl K8sclient for K8sAnywhereTopology { impl K8sclient for K8sAnywhereTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> { async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
trace!("getting k8s client");
let state = match self.k8s_state.get() { let state = match self.k8s_state.get() {
Some(state) => state, Some(state) => state,
None => return Err("K8s state not initialized yet".to_string()), None => return Err("K8s state not initialized yet".to_string()),
@@ -103,172 +87,41 @@ impl K8sclient for K8sAnywhereTopology {
} }
#[async_trait] #[async_trait]
impl Grafana for K8sAnywhereTopology { impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
async fn ensure_grafana_operator(
&self,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError> {
debug!("ensure grafana operator");
let client = self.k8s_client().await.unwrap();
let grafana_gvk = GroupVersionKind {
group: "grafana.integreatly.org".to_string(),
version: "v1beta1".to_string(),
kind: "Grafana".to_string(),
};
let name = "grafanas.grafana.integreatly.org";
let ns = "grafana";
let grafana_crd = client
.get_resource_json_value(name, Some(ns), &grafana_gvk)
.await;
match grafana_crd {
Ok(_) => {
return Ok(PreparationOutcome::Success {
details: "Found grafana CRDs in cluster".to_string(),
});
}
Err(_) => {
return self
.install_grafana_operator(inventory, Some("grafana"))
.await;
}
};
}
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError> {
let ns = "grafana";
let mut label = BTreeMap::new();
label.insert("dashboards".to_string(), "grafana".to_string());
let label_selector = LabelSelector {
match_labels: label.clone(),
match_expressions: vec![],
};
let client = self.k8s_client().await?;
let grafana = self.build_grafana(ns, &label);
client.apply(&grafana, Some(ns)).await?;
//TODO change this to a ensure ready or something better than just a timeout
client
.wait_until_deployment_ready(
"grafana-grafana-deployment",
Some("grafana"),
Some(Duration::from_secs(30)),
)
.await?;
let sa_name = "grafana-grafana-sa";
let token_secret_name = "grafana-sa-token-secret";
let sa_token_secret = self.build_sa_token_secret(token_secret_name, sa_name, ns);
client.apply(&sa_token_secret, Some(ns)).await?;
let secret_gvk = GroupVersionKind {
group: "".to_string(),
version: "v1".to_string(),
kind: "Secret".to_string(),
};
let secret = client
.get_resource_json_value(token_secret_name, Some(ns), &secret_gvk)
.await?;
let token = format!(
"Bearer {}",
self.extract_and_normalize_token(&secret).unwrap()
);
debug!("creating grafana clusterrole binding");
let clusterrolebinding =
self.build_cluster_rolebinding(sa_name, "cluster-monitoring-view", ns);
client.apply(&clusterrolebinding, Some(ns)).await?;
debug!("creating grafana datasource crd");
let thanos_url = format!(
"https://{}",
self.get_domain("thanos-querier-openshift-monitoring")
.await
.unwrap()
);
let thanos_openshift_datasource = self.build_grafana_datasource(
"thanos-openshift-monitoring",
ns,
&label_selector,
&thanos_url,
&token,
);
client.apply(&thanos_openshift_datasource, Some(ns)).await?;
debug!("creating grafana dashboard crd");
let dashboard = self.build_grafana_dashboard(ns, &label_selector);
client.apply(&dashboard, Some(ns)).await?;
debug!("creating grafana ingress");
let grafana_ingress = self.build_grafana_ingress(ns).await;
grafana_ingress
.interpret(&Inventory::empty(), self)
.await
.map_err(|e| PreparationError::new(e.to_string()))?;
Ok(PreparationOutcome::Success {
details: "Installed grafana composants".to_string(),
})
}
}
#[async_trait]
impl PrometheusMonitoring<CRDPrometheus> for K8sAnywhereTopology {
async fn install_prometheus( async fn install_prometheus(
&self, &self,
sender: &CRDPrometheus, sender: &CRDPrometheus,
_inventory: &Inventory, inventory: &Inventory,
_receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>, receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> Result<PreparationOutcome, PreparationError> {
let client = self.k8s_client().await?;
for monitor in sender.service_monitor.iter() {
client
.apply(monitor, Some(&sender.namespace))
.await
.map_err(|e| PreparationError::new(e.to_string()))?;
}
Ok(PreparationOutcome::Success {
details: "successfuly installed prometheus components".to_string(),
})
}
async fn ensure_prometheus_operator(
&self,
sender: &CRDPrometheus,
_inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError> { ) -> Result<PreparationOutcome, PreparationError> {
let po_result = self.ensure_prometheus_operator(sender).await?; let po_result = self.ensure_prometheus_operator(sender).await?;
match po_result { if po_result == PreparationOutcome::Noop {
PreparationOutcome::Success { details: _ } => { debug!("Skipping Prometheus CR installation due to missing operator.");
debug!("Detected prometheus crds operator present in cluster."); return Ok(po_result);
return Ok(po_result); }
}
PreparationOutcome::Noop => { let result = self
debug!("Skipping Prometheus CR installation due to missing operator."); .get_k8s_prometheus_application_score(sender.clone(), receivers)
return Ok(po_result); .await
} .interpret(inventory, self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: outcome.message,
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
} }
} }
} }
#[async_trait] #[async_trait]
impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology { impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
async fn install_prometheus( async fn install_prometheus(
&self, &self,
sender: &RHOBObservability, sender: &RHOBObservability,
@@ -302,14 +155,6 @@ impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
Err(err) => Err(PreparationError::new(err.to_string())), Err(err) => Err(PreparationError::new(err.to_string())),
} }
} }
async fn ensure_prometheus_operator(
&self,
sender: &RHOBObservability,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError> {
todo!()
}
} }
impl Serialize for K8sAnywhereTopology { impl Serialize for K8sAnywhereTopology {
@@ -326,7 +171,7 @@ impl K8sAnywhereTopology {
Self { Self {
k8s_state: Arc::new(OnceCell::new()), k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()),
k8s_distribution: Arc::new(OnceCell::new()), flavour: Arc::new(OnceCell::new()),
config: Arc::new(K8sAnywhereConfig::from_env()), config: Arc::new(K8sAnywhereConfig::from_env()),
} }
} }
@@ -335,13 +180,13 @@ impl K8sAnywhereTopology {
Self { Self {
k8s_state: Arc::new(OnceCell::new()), k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()),
k8s_distribution: Arc::new(OnceCell::new()), flavour: Arc::new(OnceCell::new()),
config: Arc::new(config), config: Arc::new(config),
} }
} }
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> { pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
self.k8s_distribution self.flavour
.get_or_try_init(async || { .get_or_try_init(async || {
let client = self.k8s_client().await.unwrap(); let client = self.k8s_client().await.unwrap();
@@ -371,180 +216,6 @@ impl K8sAnywhereTopology {
.await .await
} }
fn extract_and_normalize_token(&self, secret: &DynamicObject) -> Option<String> {
let token_b64 = secret
.data
.get("token")
.or_else(|| secret.data.get("data").and_then(|d| d.get("token")))
.and_then(|v| v.as_str())?;
let bytes = general_purpose::STANDARD.decode(token_b64).ok()?;
let s = String::from_utf8(bytes).ok()?;
let cleaned = s
.trim_matches(|c: char| c.is_whitespace() || c == '\0')
.to_string();
Some(cleaned)
}
pub fn build_cluster_rolebinding(
&self,
service_account_name: &str,
clusterrole_name: &str,
ns: &str,
) -> ClusterRoleBinding {
ClusterRoleBinding {
metadata: ObjectMeta {
name: Some(format!("{}-view-binding", service_account_name)),
..Default::default()
},
role_ref: RoleRef {
api_group: "rbac.authorization.k8s.io".into(),
kind: "ClusterRole".into(),
name: clusterrole_name.into(),
},
subjects: Some(vec![Subject {
kind: "ServiceAccount".into(),
name: service_account_name.into(),
namespace: Some(ns.into()),
..Default::default()
}]),
}
}
pub fn build_sa_token_secret(
&self,
secret_name: &str,
service_account_name: &str,
ns: &str,
) -> Secret {
let mut annotations = BTreeMap::new();
annotations.insert(
"kubernetes.io/service-account.name".to_string(),
service_account_name.to_string(),
);
Secret {
metadata: ObjectMeta {
name: Some(secret_name.into()),
namespace: Some(ns.into()),
annotations: Some(annotations),
..Default::default()
},
type_: Some("kubernetes.io/service-account-token".to_string()),
..Default::default()
}
}
fn build_grafana_datasource(
&self,
name: &str,
ns: &str,
label_selector: &LabelSelector,
url: &str,
token: &str,
) -> GrafanaDatasource {
let mut json_data = BTreeMap::new();
json_data.insert("timeInterval".to_string(), "5s".to_string());
GrafanaDatasource {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(ns.to_string()),
..Default::default()
},
spec: GrafanaDatasourceSpec {
instance_selector: label_selector.clone(),
allow_cross_namespace_import: Some(true),
values_from: None,
datasource: GrafanaDatasourceConfig {
access: "proxy".to_string(),
name: name.to_string(),
r#type: "prometheus".to_string(),
url: url.to_string(),
database: None,
json_data: Some(GrafanaDatasourceJsonData {
time_interval: Some("60s".to_string()),
http_header_name1: Some("Authorization".to_string()),
tls_skip_verify: Some(true),
oauth_pass_thru: Some(true),
}),
secure_json_data: Some(GrafanaDatasourceSecureJsonData {
http_header_value1: Some(format!("Bearer {token}")),
}),
is_default: Some(false),
editable: Some(true),
},
},
}
}
fn build_grafana_dashboard(
&self,
ns: &str,
label_selector: &LabelSelector,
) -> GrafanaDashboard {
let graf_dashboard = GrafanaDashboard {
metadata: ObjectMeta {
name: Some(format!("grafana-dashboard-{}", ns)),
namespace: Some(ns.to_string()),
..Default::default()
},
spec: GrafanaDashboardSpec {
resync_period: Some("30s".to_string()),
instance_selector: label_selector.clone(),
datasources: Some(vec![GrafanaDashboardDatasource {
input_name: "DS_PROMETHEUS".to_string(),
datasource_name: "thanos-openshift-monitoring".to_string(),
}]),
json: None,
grafana_com: Some(GrafanaCom {
id: 17406,
revision: None,
}),
},
};
graf_dashboard
}
fn build_grafana(&self, ns: &str, labels: &BTreeMap<String, String>) -> GrafanaCRD {
let grafana = GrafanaCRD {
metadata: ObjectMeta {
name: Some(format!("grafana-{}", ns)),
namespace: Some(ns.to_string()),
labels: Some(labels.clone()),
..Default::default()
},
spec: GrafanaSpec {
config: None,
admin_user: None,
admin_password: None,
ingress: None,
persistence: None,
resources: None,
},
};
grafana
}
async fn build_grafana_ingress(&self, ns: &str) -> K8sIngressScore {
let domain = self.get_domain(&format!("grafana-{}", ns)).await.unwrap();
let name = format!("{}-grafana", ns);
let backend_service = format!("grafana-{}-service", ns);
K8sIngressScore {
name: fqdn::fqdn!(&name),
host: fqdn::fqdn!(&domain),
backend_service: fqdn::fqdn!(&backend_service),
port: 3000,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn::fqdn!(&ns)),
ingress_class_name: Some("openshift-default".to_string()),
}
}
async fn get_cluster_observability_operator_prometheus_application_score( async fn get_cluster_observability_operator_prometheus_application_score(
&self, &self,
sender: RHOBObservability, sender: RHOBObservability,
@@ -562,14 +233,13 @@ impl K8sAnywhereTopology {
&self, &self,
sender: CRDPrometheus, sender: CRDPrometheus,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>, receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
service_monitors: Option<Vec<ServiceMonitor>>,
) -> K8sPrometheusCRDAlertingScore { ) -> K8sPrometheusCRDAlertingScore {
return K8sPrometheusCRDAlertingScore { K8sPrometheusCRDAlertingScore {
sender, sender,
receivers: receivers.unwrap_or_default(), receivers: receivers.unwrap_or_default(),
service_monitors: service_monitors.unwrap_or_default(), service_monitors: vec![],
prometheus_rules: vec![], prometheus_rules: vec![],
}; }
} }
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> { async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
@@ -837,30 +507,6 @@ impl K8sAnywhereTopology {
details: "prometheus operator present in cluster".into(), details: "prometheus operator present in cluster".into(),
}) })
} }
async fn install_grafana_operator(
&self,
inventory: &Inventory,
ns: Option<&str>,
) -> Result<PreparationOutcome, PreparationError> {
let namespace = ns.unwrap_or("grafana");
info!("installing grafana operator in ns {namespace}");
let tenant = self.get_k8s_tenant_manager()?.get_tenant_config().await;
let mut namespace_scope = false;
if tenant.is_some() {
namespace_scope = true;
}
let _grafana_operator_score = grafana_helm_chart_score(namespace, namespace_scope)
.interpret(inventory, self)
.await
.map_err(|e| PreparationError::new(e.to_string()));
Ok(PreparationOutcome::Success {
details: format!(
"Successfully installed grafana operator in ns {}",
ns.unwrap()
),
})
}
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@@ -975,36 +621,56 @@ impl TenantManager for K8sAnywhereTopology {
#[async_trait] #[async_trait]
impl Ingress for K8sAnywhereTopology { impl Ingress for K8sAnywhereTopology {
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> { async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
use log::{trace, debug, warn};
let client = self.k8s_client().await?; let client = self.k8s_client().await?;
if let Some(Some(k8s_state)) = self.k8s_state.get() { if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source { match k8s_state.source {
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")), K8sSource::LocalK3d => {
// Local developer UX
return Ok(format!("{service}.local.k3d"));
}
K8sSource::Kubeconfig => { K8sSource::Kubeconfig => {
self.openshift_ingress_operator_available().await?; trace!("K8sSource is kubeconfig; attempting to detect domain");
let gvk = GroupVersionKind { // 1) Try OpenShift IngressController domain (backward compatible)
group: "operator.openshift.io".into(), if self.openshift_ingress_operator_available().await.is_ok() {
version: "v1".into(), trace!("OpenShift ingress operator detected; using IngressController");
kind: "IngressController".into(), let gvk = GroupVersionKind {
}; group: "operator.openshift.io".into(),
let ic = client version: "v1".into(),
.get_resource_json_value( kind: "IngressController".into(),
"default", };
Some("openshift-ingress-operator"), let ic = client
&gvk, .get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
) .await
.await .map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
.map_err(|_| {
PreparationError::new("Failed to fetch IngressController".to_string())
})?;
match ic.data["status"]["domain"].as_str() { if let Some(domain) = ic.data["status"]["domain"].as_str() {
Some(domain) => Ok(format!("{service}.{domain}")), return Ok(format!("{service}.{domain}"));
None => Err(PreparationError::new("Could not find domain".to_string())), } else {
warn!("OpenShift IngressController present but no status.domain set");
}
} else {
trace!("OpenShift ingress operator not detected; trying generic Kubernetes");
} }
// 2) Try NGINX Ingress Controller common setups
// 2.a) Well-known namespace/name for the controller Service
// - upstream default: namespace "ingress-nginx", service "ingress-nginx-controller"
// - some distros: "ingress-nginx-controller" svc in "ingress-nginx" ns
// If found with LoadBalancer ingress hostname, use its base domain.
if let Some(domain) = try_nginx_lb_domain(&client).await? {
return Ok(format!("{service}.{domain}"));
}
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
warn!("Could not determine external ingress domain; falling back to internal-only DNS");
let internal = format!("{service}.default.svc.cluster.local");
Ok(internal)
} }
} }
} else { } else {
@@ -1014,3 +680,57 @@ impl Ingress for K8sAnywhereTopology {
} }
} }
} }
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
use log::{trace, debug};
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
let svc_gvk = GroupVersionKind {
group: "".into(), // core
version: "v1".into(),
kind: "Service".into(),
};
let candidates = [
("ingress-nginx", "ingress-nginx-controller"),
("ingress-nginx", "ingress-nginx-controller-internal"),
("ingress-nginx", "ingress-nginx"), // some charts name the svc like this
("kube-system", "ingress-nginx-controller"), // less common but seen
];
for (ns, name) in candidates {
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await {
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default();
for entry in lb_hosts {
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
debug!("Found NGINX LB hostname: {host}");
if let Some(domain) = extract_base_domain(host) {
return Ok(Some(domain.to_string()));
} else {
return Ok(Some(host.to_string())); // already a domain
}
}
if let Some(ip) = entry.get("ip").and_then(|v| v.as_str()) {
// If only an IP is exposed, we can't create a hostname; return None to keep searching
debug!("NGINX LB exposes IP {ip} (no hostname); skipping");
}
}
}
}
Ok(None)
}
fn extract_base_domain(host: &str) -> Option<String> {
// For a host like a1b2c3d4e5f6abcdef.elb.amazonaws.com -> base domain elb.amazonaws.com
// For a managed DNS like xyz.example.com -> base domain example.com (keep 2+ labels)
// Heuristic: keep last 2 labels by default; special-case known multi-label TLDs if needed.
let parts: Vec<&str> = host.split('.').collect();
if parts.len() >= 2 {
// Very conservative: last 2 labels
Some(parts[parts.len() - 2..].join("."))
} else {
None
}
}

View File

@@ -28,7 +28,13 @@ pub trait LoadBalancer: Send + Sync {
&self, &self,
service: &LoadBalancerService, service: &LoadBalancerService,
) -> Result<(), ExecutorError> { ) -> Result<(), ExecutorError> {
self.add_service(service).await?; debug!(
"Listing LoadBalancer services {:?}",
self.list_services().await
);
if !self.list_services().await.contains(service) {
self.add_service(service).await?;
}
Ok(()) Ok(())
} }
} }

View File

@@ -1,6 +1,5 @@
mod ha_cluster; mod ha_cluster;
pub mod ingress; pub mod ingress;
pub mod node_exporter;
use harmony_types::net::IpAddress; use harmony_types::net::IpAddress;
mod host_binding; mod host_binding;
mod http; mod http;
@@ -187,7 +186,7 @@ impl TopologyState {
} }
} }
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub enum DeploymentTarget { pub enum DeploymentTarget {
LocalDev, LocalDev,
Staging, Staging,

View File

@@ -1,21 +1,10 @@
use std::{ use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
error::Error,
fmt::{self, Debug},
net::Ipv4Addr,
str::FromStr,
sync::Arc,
};
use async_trait::async_trait; use async_trait::async_trait;
use derive_new::new; use harmony_types::net::{IpAddress, MacAddress};
use harmony_types::{
id::Id,
net::{IpAddress, MacAddress},
switch::PortLocation,
};
use serde::Serialize; use serde::Serialize;
use crate::{executors::ExecutorError, hardware::PhysicalHost}; use crate::executors::ExecutorError;
use super::{LogicalHost, k8s::K8sClient}; use super::{LogicalHost, k8s::K8sClient};
@@ -26,8 +15,8 @@ pub struct DHCPStaticEntry {
pub ip: Ipv4Addr, pub ip: Ipv4Addr,
} }
impl fmt::Display for DHCPStaticEntry { impl std::fmt::Display for DHCPStaticEntry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mac = self let mac = self
.mac .mac
.iter() .iter()
@@ -49,8 +38,8 @@ pub trait Firewall: Send + Sync {
fn get_host(&self) -> LogicalHost; fn get_host(&self) -> LogicalHost;
} }
impl Debug for dyn Firewall { impl std::fmt::Debug for dyn Firewall {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("Firewall {}", self.get_ip())) f.write_fmt(format_args!("Firewall {}", self.get_ip()))
} }
} }
@@ -72,7 +61,7 @@ pub struct PxeOptions {
} }
#[async_trait] #[async_trait]
pub trait DhcpServer: Send + Sync + Debug { pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>; async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
@@ -111,8 +100,8 @@ pub trait DnsServer: Send + Sync {
} }
} }
impl Debug for dyn DnsServer { impl std::fmt::Debug for dyn DnsServer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("DnsServer {}", self.get_ip())) f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
} }
} }
@@ -148,8 +137,8 @@ pub enum DnsRecordType {
TXT, TXT,
} }
impl fmt::Display for DnsRecordType { impl std::fmt::Display for DnsRecordType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
DnsRecordType::A => write!(f, "A"), DnsRecordType::A => write!(f, "A"),
DnsRecordType::AAAA => write!(f, "AAAA"), DnsRecordType::AAAA => write!(f, "AAAA"),
@@ -183,77 +172,6 @@ impl FromStr for DnsRecordType {
} }
} }
#[async_trait]
pub trait Switch: Send + Sync {
async fn setup_switch(&self) -> Result<(), SwitchError>;
async fn get_port_for_mac_address(
&self,
mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError>;
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
}
#[derive(Clone, Debug, PartialEq)]
pub struct HostNetworkConfig {
pub host_id: Id,
pub switch_ports: Vec<SwitchPort>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct SwitchPort {
pub interface: NetworkInterface,
pub port: PortLocation,
}
#[derive(Clone, Debug, PartialEq)]
pub struct NetworkInterface {
pub name: String,
pub mac_address: MacAddress,
pub speed_mbps: Option<u32>,
pub mtu: u32,
}
#[derive(Debug, Clone, new)]
pub struct SwitchError {
msg: String,
}
impl fmt::Display for SwitchError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.msg)
}
}
impl Error for SwitchError {}
#[async_trait]
pub trait SwitchClient: Debug + Send + Sync {
/// Executes essential, idempotent, one-time initial configuration steps.
///
/// This is an opiniated procedure that setups a switch to provide high availability
/// capabilities as decided by the NationTech team.
///
/// This includes tasks like enabling switchport for all interfaces
/// except the ones intended for Fabric Networking, etc.
///
/// The implementation must ensure the operation is **idempotent** (safe to run multiple times)
/// and that it doesn't break existing configurations.
async fn setup(&self) -> Result<(), SwitchError>;
async fn find_port(
&self,
mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError>;
async fn configure_port_channel(
&self,
channel_name: &str,
switch_ports: Vec<PortLocation>,
) -> Result<u8, SwitchError>;
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use std::sync::Arc; use std::sync::Arc;

View File

@@ -1,17 +0,0 @@
use async_trait::async_trait;
use crate::executors::ExecutorError;
#[async_trait]
pub trait NodeExporter: Send + Sync {
async fn ensure_initialized(&self) -> Result<(), ExecutorError>;
async fn commit_config(&self) -> Result<(), ExecutorError>;
async fn reload_restart(&self) -> Result<(), ExecutorError>;
}
//TODO complete this impl
impl std::fmt::Debug for dyn NodeExporter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("NodeExporter ",))
}
}

View File

@@ -21,7 +21,6 @@ pub struct AlertingInterpret<S: AlertSender> {
pub sender: S, pub sender: S,
pub receivers: Vec<Box<dyn AlertReceiver<S>>>, pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
pub rules: Vec<Box<dyn AlertRule<S>>>, pub rules: Vec<Box<dyn AlertRule<S>>>,
pub scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>,
} }
#[async_trait] #[async_trait]
@@ -31,7 +30,6 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
debug!("hit sender configure for AlertingInterpret");
self.sender.configure(inventory, topology).await?; self.sender.configure(inventory, topology).await?;
for receiver in self.receivers.iter() { for receiver in self.receivers.iter() {
receiver.install(&self.sender).await?; receiver.install(&self.sender).await?;
@@ -40,12 +38,6 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
debug!("installing rule: {:#?}", rule); debug!("installing rule: {:#?}", rule);
rule.install(&self.sender).await?; rule.install(&self.sender).await?;
} }
if let Some(targets) = &self.scrape_targets {
for target in targets.iter() {
debug!("installing scrape_target: {:#?}", target);
target.install(&self.sender).await?;
}
}
self.sender.ensure_installed(inventory, topology).await?; self.sender.ensure_installed(inventory, topology).await?;
Ok(Outcome::success(format!( Ok(Outcome::success(format!(
"successfully installed alert sender {}", "successfully installed alert sender {}",
@@ -85,7 +77,6 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
} }
#[async_trait] #[async_trait]
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync { pub trait ScrapeTarget<S: AlertSender> {
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>; async fn install(&self, sender: &S) -> Result<(), InterpretError>;
fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>;
} }

View File

@@ -1,378 +0,0 @@
use async_trait::async_trait;
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
use harmony_types::{
net::{IpAddress, MacAddress},
switch::{PortDeclaration, PortLocation},
};
use option_ext::OptionExt;
use crate::topology::{SwitchClient, SwitchError};
#[derive(Debug)]
pub struct BrocadeSwitchClient {
brocade: Box<dyn BrocadeClient + Send + Sync>,
}
impl BrocadeSwitchClient {
pub async fn init(
ip_addresses: &[IpAddress],
username: &str,
password: &str,
options: Option<BrocadeOptions>,
) -> Result<Self, brocade::Error> {
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
Ok(Self { brocade })
}
}
#[async_trait]
impl SwitchClient for BrocadeSwitchClient {
async fn setup(&self) -> Result<(), SwitchError> {
let stack_topology = self
.brocade
.get_stack_topology()
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
let interfaces = self
.brocade
.get_interfaces()
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
let interfaces: Vec<(String, PortOperatingMode)> = interfaces
.into_iter()
.filter(|interface| {
interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected
})
.filter(|interface| {
!stack_topology.iter().any(|link: &InterSwitchLink| {
link.local_port == interface.port_location
|| link.remote_port.contains(&interface.port_location)
})
})
.map(|interface| (interface.name.clone(), PortOperatingMode::Access))
.collect();
if interfaces.is_empty() {
return Ok(());
}
self.brocade
.configure_interfaces(interfaces)
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
Ok(())
}
async fn find_port(
&self,
mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError> {
let table = self
.brocade
.get_mac_address_table()
.await
.map_err(|e| SwitchError::new(format!("{e}")))?;
let port = table
.iter()
.find(|entry| entry.mac_address == *mac_address)
.map(|entry| match &entry.port {
PortDeclaration::Single(port_location) => Ok(port_location.clone()),
_ => Err(SwitchError::new(
"Multiple ports found for MAC address".into(),
)),
});
match port {
Some(Ok(p)) => Ok(Some(p)),
Some(Err(e)) => Err(e),
None => Ok(None),
}
}
async fn configure_port_channel(
&self,
channel_name: &str,
switch_ports: Vec<PortLocation>,
) -> Result<u8, SwitchError> {
let channel_id = self
.brocade
.find_available_channel_id()
.await
.map_err(|e| SwitchError::new(format!("{e}")))?;
self.brocade
.create_port_channel(channel_id, channel_name, &switch_ports)
.await
.map_err(|e| SwitchError::new(format!("{e}")))?;
Ok(channel_id)
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use assertor::*;
use async_trait::async_trait;
use brocade::{
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
};
use harmony_types::switch::PortLocation;
use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient};
#[tokio::test]
async fn setup_should_configure_ethernet_interfaces_as_access_ports() {
let first_interface = given_interface()
.with_port_location(PortLocation(1, 0, 1))
.build();
let second_interface = given_interface()
.with_port_location(PortLocation(1, 0, 4))
.build();
let brocade = Box::new(FakeBrocadeClient::new(
vec![],
vec![first_interface.clone(), second_interface.clone()],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).contains_exactly(vec![
(first_interface.name.clone(), PortOperatingMode::Access),
(second_interface.name.clone(), PortOperatingMode::Access),
]);
}
#[tokio::test]
async fn setup_with_an_already_configured_interface_should_skip_configuration() {
let brocade = Box::new(FakeBrocadeClient::new(
vec![],
vec![
given_interface()
.with_operating_mode(Some(PortOperatingMode::Access))
.build(),
],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).is_empty();
}
#[tokio::test]
async fn setup_with_a_disconnected_interface_should_skip_configuration() {
let brocade = Box::new(FakeBrocadeClient::new(
vec![],
vec![
given_interface()
.with_status(InterfaceStatus::SfpAbsent)
.build(),
given_interface()
.with_status(InterfaceStatus::NotConnected)
.build(),
],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).is_empty();
}
#[tokio::test]
async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() {
let brocade = Box::new(FakeBrocadeClient::new(
vec![
given_inter_switch_link()
.between(PortLocation(1, 0, 1), PortLocation(2, 0, 1))
.build(),
given_inter_switch_link()
.between(PortLocation(2, 0, 2), PortLocation(3, 0, 1))
.build(),
],
vec![
given_interface()
.with_port_location(PortLocation(1, 0, 1))
.build(),
given_interface()
.with_port_location(PortLocation(2, 0, 1))
.build(),
given_interface()
.with_port_location(PortLocation(3, 0, 1))
.build(),
],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).is_empty();
}
#[derive(Debug, Clone)]
struct FakeBrocadeClient {
stack_topology: Vec<InterSwitchLink>,
interfaces: Vec<InterfaceInfo>,
configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>,
}
#[async_trait]
impl BrocadeClient for FakeBrocadeClient {
async fn version(&self) -> Result<BrocadeInfo, Error> {
todo!()
}
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
todo!()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
Ok(self.stack_topology.clone())
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
Ok(self.interfaces.clone())
}
async fn configure_interfaces(
&self,
interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
*configured_interfaces = interfaces;
Ok(())
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
todo!()
}
async fn create_port_channel(
&self,
_channel_id: PortChannelId,
_channel_name: &str,
_ports: &[PortLocation],
) -> Result<(), Error> {
todo!()
}
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
todo!()
}
}
impl FakeBrocadeClient {
fn new(stack_topology: Vec<InterSwitchLink>, interfaces: Vec<InterfaceInfo>) -> Self {
Self {
stack_topology,
interfaces,
configured_interfaces: Arc::new(Mutex::new(vec![])),
}
}
}
struct InterfaceInfoBuilder {
port_location: Option<PortLocation>,
interface_type: Option<InterfaceType>,
operating_mode: Option<PortOperatingMode>,
status: Option<InterfaceStatus>,
}
impl InterfaceInfoBuilder {
fn build(&self) -> InterfaceInfo {
let interface_type = self
.interface_type
.clone()
.unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into()));
let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1));
let name = format!("{interface_type} {port_location}");
let status = self.status.clone().unwrap_or(InterfaceStatus::Connected);
InterfaceInfo {
name,
port_location,
interface_type,
operating_mode: self.operating_mode.clone(),
status,
}
}
fn with_port_location(self, port_location: PortLocation) -> Self {
Self {
port_location: Some(port_location),
..self
}
}
fn with_operating_mode(self, operating_mode: Option<PortOperatingMode>) -> Self {
Self {
operating_mode,
..self
}
}
fn with_status(self, status: InterfaceStatus) -> Self {
Self {
status: Some(status),
..self
}
}
}
struct InterSwitchLinkBuilder {
link: Option<(PortLocation, PortLocation)>,
}
impl InterSwitchLinkBuilder {
fn build(&self) -> InterSwitchLink {
let link = self
.link
.clone()
.unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1)));
InterSwitchLink {
local_port: link.0,
remote_port: Some(link.1),
}
}
fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self {
Self {
link: Some((local_port, remote_port)),
}
}
}
fn given_interface() -> InterfaceInfoBuilder {
InterfaceInfoBuilder {
port_location: None,
interface_type: None,
operating_mode: None,
status: None,
}
}
fn given_inter_switch_link() -> InterSwitchLinkBuilder {
InterSwitchLinkBuilder { link: None }
}
}

View File

@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
impl InventoryRepositoryFactory { impl InventoryRepositoryFactory {
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> { pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
Ok(Box::new( Ok(Box::new(
SqliteInventoryRepository::new(&DATABASE_URL).await?, SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
)) ))
} }
} }

View File

@@ -1,4 +1,3 @@
pub mod brocade;
pub mod executors; pub mod executors;
pub mod hp_ilo; pub mod hp_ilo;
pub mod intel_amt; pub mod intel_amt;

View File

@@ -10,7 +10,7 @@ use super::OPNSenseFirewall;
#[async_trait] #[async_trait]
impl DnsServer for OPNSenseFirewall { impl DnsServer for OPNSenseFirewall {
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> { async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
todo!("Refactor this to use dnsmasq") todo!("Refactor this to use dnsmasq")
// let mut writable_opnsense = self.opnsense_config.write().await; // let mut writable_opnsense = self.opnsense_config.write().await;
// let mut dns = writable_opnsense.dns(); // let mut dns = writable_opnsense.dns();
@@ -68,7 +68,7 @@ impl DnsServer for OPNSenseFirewall {
self.host.clone() self.host.clone()
} }
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> { async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
todo!("Refactor this to use dnsmasq") todo!("Refactor this to use dnsmasq")
// let mut writable_opnsense = self.opnsense_config.write().await; // let mut writable_opnsense = self.opnsense_config.write().await;
// let mut dns = writable_opnsense.dns(); // let mut dns = writable_opnsense.dns();

View File

@@ -26,13 +26,19 @@ impl LoadBalancer for OPNSenseFirewall {
} }
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
warn!(
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
);
let mut config = self.opnsense_config.write().await; let mut config = self.opnsense_config.write().await;
let mut load_balancer = config.load_balancer();
let (frontend, backend, servers, healthcheck) = let (frontend, backend, servers, healthcheck) =
harmony_load_balancer_service_to_haproxy_xml(service); harmony_load_balancer_service_to_haproxy_xml(service);
let mut load_balancer = config.load_balancer();
load_balancer.configure_service(frontend, backend, servers, healthcheck); load_balancer.add_backend(backend);
load_balancer.add_frontend(frontend);
load_balancer.add_servers(servers);
if let Some(healthcheck) = healthcheck {
load_balancer.add_healthcheck(healthcheck);
}
Ok(()) Ok(())
} }
@@ -100,7 +106,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
.backends .backends
.backends .backends
.iter() .iter()
.find(|b| Some(b.uuid.clone()) == frontend.default_backend); .find(|b| b.uuid == frontend.default_backend);
let mut health_check = None; let mut health_check = None;
match matching_backend { match matching_backend {
@@ -110,7 +116,8 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
} }
None => { None => {
warn!( warn!(
"HAProxy config could not find a matching backend for frontend {frontend:?}" "HAProxy config could not find a matching backend for frontend {:?}",
frontend
); );
} }
} }
@@ -145,11 +152,11 @@ pub(crate) fn get_servers_for_backend(
.servers .servers
.iter() .iter()
.filter_map(|server| { .filter_map(|server| {
let address = server.address.clone()?;
let port = server.port?;
if backend_servers.contains(&server.uuid.as_str()) { if backend_servers.contains(&server.uuid.as_str()) {
return Some(BackendServer { address, port }); return Some(BackendServer {
address: server.address.clone(),
port: server.port,
});
} }
None None
}) })
@@ -340,7 +347,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
name: format!("frontend_{}", service.listening_port), name: format!("frontend_{}", service.listening_port),
bind: service.listening_port.to_string(), bind: service.listening_port.to_string(),
mode: "tcp".to_string(), // TODO do not depend on health check here mode: "tcp".to_string(), // TODO do not depend on health check here
default_backend: Some(backend.uuid.clone()), default_backend: backend.uuid.clone(),
..Default::default() ..Default::default()
}; };
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp"); info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
@@ -354,8 +361,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
uuid: Uuid::new_v4().to_string(), uuid: Uuid::new_v4().to_string(),
name: format!("{}_{}", &server.address, &server.port), name: format!("{}_{}", &server.address, &server.port),
enabled: 1, enabled: 1,
address: Some(server.address.clone()), address: server.address.clone(),
port: Some(server.port), port: server.port,
mode: "active".to_string(), mode: "active".to_string(),
server_type: "static".to_string(), server_type: "static".to_string(),
..Default::default() ..Default::default()
@@ -378,8 +385,8 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: Some("192.168.1.1".to_string()), address: "192.168.1.1".to_string(),
port: Some(80), port: 80,
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
@@ -404,8 +411,8 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: Some("192.168.1.1".to_string()), address: "192.168.1.1".to_string(),
port: Some(80), port: 80,
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
@@ -424,8 +431,8 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: Some("192.168.1.1".to_string()), address: "192.168.1.1".to_string(),
port: Some(80), port: 80,
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
@@ -446,16 +453,16 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: Some("some-hostname.test.mcd".to_string()), address: "some-hostname.test.mcd".to_string(),
port: Some(80), port: 80,
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server2".to_string(), uuid: "server2".to_string(),
address: Some("192.168.1.2".to_string()), address: "192.168.1.2".to_string(),
port: Some(8080), port: 8080,
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);

View File

@@ -4,7 +4,6 @@ mod firewall;
mod http; mod http;
mod load_balancer; mod load_balancer;
mod management; mod management;
pub mod node_exporter;
mod tftp; mod tftp;
use std::sync::Arc; use std::sync::Arc;

View File

@@ -1,47 +0,0 @@
use async_trait::async_trait;
use log::debug;
use crate::{
executors::ExecutorError, infra::opnsense::OPNSenseFirewall,
topology::node_exporter::NodeExporter,
};
#[async_trait]
impl NodeExporter for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let node_exporter = config.node_exporter();
if let Some(config) = node_exporter.get_full_config() {
debug!(
"Node exporter available in opnsense config, assuming it is already installed. {config:?}"
);
} else {
config
.install_package("os-node_exporter")
.await
.map_err(|e| {
ExecutorError::UnexpectedError(format!("Executor failed when trying to install os-node_exporter package with error {e:?}"
))
})?;
}
config
.node_exporter()
.enable(true)
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
Ok(())
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
OPNSenseFirewall::commit_config(self).await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.opnsense_config
.write()
.await
.node_exporter()
.reload_restart()
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
}
}

View File

@@ -21,7 +21,7 @@ pub struct Helm {
pub skip_schema_validation: Option<bool>, pub skip_schema_validation: Option<bool>,
pub version: Option<String>, pub version: Option<String>,
pub kube_version: Option<String>, pub kube_version: Option<String>,
pub api_versions: Vec<String>, // pub api_versions: Vec<String>,
pub namespace: Option<String>, pub namespace: Option<String>,
} }
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
skip_schema_validation: None, skip_schema_validation: None,
version: None, version: None,
kube_version: None, kube_version: None,
api_versions: vec![], // api_versions: vec![],
namespace: None, namespace: None,
}, },
path: "".to_string(), path: "".to_string(),
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
skip_schema_validation: None, skip_schema_validation: None,
version: None, version: None,
kube_version: None, kube_version: None,
api_versions: vec![], // api_versions: vec![],
namespace: None, namespace: None,
}, },
}, },
@@ -181,13 +181,11 @@ impl From<CDApplicationConfig> for ArgoApplication {
} }
impl ArgoApplication { impl ArgoApplication {
pub fn to_yaml(&self) -> serde_yaml::Value { pub fn to_yaml(&self, target_namespace: Option<&str>) -> serde_yaml::Value {
let name = &self.name; let name = &self.name;
let namespace = if let Some(ns) = self.namespace.as_ref() { let default_ns = "argocd".to_string();
ns let namespace: &str =
} else { target_namespace.unwrap_or(self.namespace.as_ref().unwrap_or(&default_ns));
"argocd"
};
let project = &self.project; let project = &self.project;
let yaml_str = format!( let yaml_str = format!(
@@ -285,7 +283,7 @@ mod tests {
skip_schema_validation: None, skip_schema_validation: None,
version: None, version: None,
kube_version: None, kube_version: None,
api_versions: vec![], // api_versions: vec![],
namespace: None, namespace: None,
}, },
path: "".to_string(), path: "".to_string(),
@@ -345,7 +343,7 @@ spec:
assert_eq!( assert_eq!(
expected_yaml_output.trim(), expected_yaml_output.trim(),
serde_yaml::to_string(&app.clone().to_yaml()) serde_yaml::to_string(&app.clone().to_yaml(None))
.unwrap() .unwrap()
.trim() .trim()
); );

View File

@@ -1,22 +1,19 @@
use async_trait::async_trait; use async_trait::async_trait;
use harmony_macros::hurl; use log::{debug, info, trace, warn};
use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use non_blank_string_rs::NonBlankString; use non_blank_string_rs::NonBlankString;
use serde::Serialize; use serde::Serialize;
use serde::de::DeserializeOwned; use std::{str::FromStr, sync::Arc};
use std::{process::Command, str::FromStr, sync::Arc};
use crate::{ use crate::{
data::Version, data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory, inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository}, modules::{
score::Score, argocd::{ArgoDeploymentType, detect_argo_deployment_type},
topology::{ helm::chart::{HelmChartScore, HelmRepository},
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
}, },
score::Score,
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
@@ -25,6 +22,7 @@ use super::ArgoApplication;
#[derive(Debug, Serialize, Clone)] #[derive(Debug, Serialize, Clone)]
pub struct ArgoHelmScore { pub struct ArgoHelmScore {
pub namespace: String, pub namespace: String,
// TODO: remove and rely on topology (it now knows the flavor)
pub openshift: bool, pub openshift: bool,
pub argo_apps: Vec<ArgoApplication>, pub argo_apps: Vec<ArgoApplication>,
} }
@@ -55,29 +53,101 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let k8s_client = topology.k8s_client().await?; trace!("Starting ArgoInterpret execution {self:?}");
let svc = format!("argo-{}", self.score.namespace.clone()); let k8s_client: Arc<K8sClient> = topology.k8s_client().await?;
trace!("Got k8s client");
let desired_ns = self.score.namespace.clone();
debug!("ArgoInterpret detecting cluster configuration");
let svc = format!("argo-{}", desired_ns);
let domain = topology.get_domain(&svc).await?; let domain = topology.get_domain(&svc).await?;
let helm_score = debug!("Resolved Argo service domain for '{}': {}", svc, domain);
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
helm_score.interpret(inventory, topology).await?; // Detect current Argo deployment type
let current = detect_argo_deployment_type(&k8s_client, &desired_ns).await?;
info!("Detected Argo deployment type: {:?}", current);
// Decide control namespace and whether we must install
let (control_ns, must_install) = match current.clone() {
ArgoDeploymentType::NotInstalled => {
info!(
"Argo CD not installed. Will install via Helm into namespace '{}'.",
desired_ns
);
(desired_ns.clone(), true)
}
ArgoDeploymentType::AvailableInDesiredNamespace(ns) => {
info!(
"Argo CD already installed by Harmony in '{}'. Skipping install.",
ns
);
(ns, false)
}
ArgoDeploymentType::InstalledClusterWide(ns) => {
info!(
"Argo CD installed cluster-wide in namespace '{}'.",
ns
);
(ns, false)
}
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {
// TODO we could support this use case by installing a new argo instance. But that
// means handling a few cases that are out of scope for now :
// - Wether argo operator is installed
// - Managing CRD versions compatibility
// - Potentially handling the various k8s flavors and setups we might encounter
//
// There is a possibility that the helm chart already handles most or even all of these use cases but they are out of scope for now.
let msg = format!(
"Argo CD found in '{}' but it is namespace-scoped and not supported for attachment yet.",
ns
);
warn!("{}", msg);
return Err(InterpretError::new(msg));
}
};
info!("ArgoCD will be installed : {must_install} . Current argocd status : {current:?} ");
if must_install {
let helm_score = argo_helm_chart_score(&desired_ns, self.score.openshift, &domain);
info!(
"Installing Argo CD via Helm into namespace '{}' ...",
desired_ns
);
helm_score.interpret(inventory, topology).await?;
info!("Argo CD install complete in '{}'.", desired_ns);
}
let yamls: Vec<serde_yaml::Value> = self
.argo_apps
.iter()
.map(|a| a.to_yaml(Some(&control_ns)))
.collect();
info!(
"Applying {} Argo application object(s) into control namespace '{}'.",
yamls.len(),
control_ns
);
k8s_client k8s_client
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None) .apply_yaml_many(&yamls, Some(control_ns.as_str()))
.await .await
.unwrap(); .map_err(|e| InterpretError::new(format!("Failed applying Argo CRs: {e}")))?;
Ok(Outcome::success_with_details( Ok(Outcome::success_with_details(
format!( format!(
"ArgoCD {} {}", "ArgoCD {} {}",
self.argo_apps.len(), self.argo_apps.len(),
match self.argo_apps.len() { if self.argo_apps.len() == 1 {
1 => "application", "application"
_ => "applications", } else {
"applications"
} }
), ),
vec![format!("argo application: http://{}", domain)], vec![
format!("control_namespace={}", control_ns),
format!("argo ui: http://{}", domain),
],
)) ))
} }
@@ -86,7 +156,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
} }
fn get_version(&self) -> Version { fn get_version(&self) -> Version {
todo!() Version::from("0.1.0").unwrap()
} }
fn get_status(&self) -> InterpretStatus { fn get_status(&self) -> InterpretStatus {
@@ -94,39 +164,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
} }
fn get_children(&self) -> Vec<Id> { fn get_children(&self) -> Vec<Id> {
todo!() vec![]
}
}
impl ArgoInterpret {
pub async fn get_host_domain(
&self,
client: Arc<K8sClient>,
openshift: bool,
) -> Result<String, InterpretError> {
//This should be the job of the topology to determine if we are in
//openshift, potentially we need on openshift topology the same way we create a
//localhosttopology
match openshift {
true => {
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.await?;
match ic.data["status"]["domain"].as_str() {
Some(domain) => return Ok(domain.to_string()),
None => return Err(InterpretError::new("Could not find domain".to_string())),
}
}
false => {
todo!()
}
};
} }
} }
@@ -1052,7 +1090,7 @@ commitServer:
install_only: false, install_only: false,
repository: Some(HelmRepository::new( repository: Some(HelmRepository::new(
"argo".to_string(), "argo".to_string(),
hurl!("https://argoproj.github.io/argo-helm"), url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(),
true, true,
)), )),
} }

View File

@@ -2,11 +2,7 @@ use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome, Application, ApplicationFeature, InstallationError, InstallationOutcome,
}; };
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::grafana::grafana::Grafana;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
ServiceMonitor, ServiceMonitorSpec,
};
use crate::topology::MultiTargetTopology; use crate::topology::MultiTargetTopology;
use crate::topology::ingress::Ingress; use crate::topology::ingress::Ingress;
use crate::{ use crate::{
@@ -18,7 +14,7 @@ use crate::{
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
}; };
use crate::{ use crate::{
modules::prometheus::prometheus::PrometheusMonitoring, modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver, topology::oberservability::monitoring::AlertReceiver,
}; };
use async_trait::async_trait; use async_trait::async_trait;
@@ -26,7 +22,6 @@ use base64::{Engine as _, engine::general_purpose};
use harmony_secret::SecretManager; use harmony_secret::SecretManager;
use harmony_secret_derive::Secret; use harmony_secret_derive::Secret;
use harmony_types::net::Url; use harmony_types::net::Url;
use kube::api::ObjectMeta;
use log::{debug, info}; use log::{debug, info};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::Arc; use std::sync::Arc;
@@ -45,8 +40,7 @@ impl<
+ TenantManager + TenantManager
+ K8sclient + K8sclient
+ MultiTargetTopology + MultiTargetTopology
+ PrometheusMonitoring<CRDPrometheus> + PrometheusApplicationMonitoring<CRDPrometheus>
+ Grafana
+ Ingress + Ingress
+ std::fmt::Debug, + std::fmt::Debug,
> ApplicationFeature<T> for Monitoring > ApplicationFeature<T> for Monitoring
@@ -63,20 +57,10 @@ impl<
.unwrap_or_else(|| self.application.name()); .unwrap_or_else(|| self.application.name());
let domain = topology.get_domain("ntfy").await.unwrap(); let domain = topology.get_domain("ntfy").await.unwrap();
let app_service_monitor = ServiceMonitor {
metadata: ObjectMeta {
name: Some(self.application.name()),
namespace: Some(namespace.clone()),
..Default::default()
},
spec: ServiceMonitorSpec::default(),
};
let mut alerting_score = ApplicationMonitoringScore { let mut alerting_score = ApplicationMonitoringScore {
sender: CRDPrometheus { sender: CRDPrometheus {
namespace: namespace.clone(), namespace: namespace.clone(),
client: topology.k8s_client().await.unwrap(), client: topology.k8s_client().await.unwrap(),
service_monitor: vec![app_service_monitor],
}, },
application: self.application.clone(), application: self.application.clone(),
receivers: self.alert_receiver.clone(), receivers: self.alert_receiver.clone(),

View File

@@ -10,12 +10,11 @@ use crate::{
data::Version, data::Version,
inventory::Inventory, inventory::Inventory,
modules::application::{ modules::application::{
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant, features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
features::{ArgoApplication, ArgoHelmScore},
}, },
score::Score, score::Score,
topology::{ topology::{
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress, ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
}, },
}; };
@@ -47,11 +46,11 @@ use crate::{
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources /// - ArgoCD to install/upgrade/rollback/inspect k8s resources
/// - Kubernetes for runtime orchestration /// - Kubernetes for runtime orchestration
#[derive(Debug, Default, Clone)] #[derive(Debug, Default, Clone)]
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> { pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
pub application: Arc<A>, pub application: Arc<A>,
} }
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> { impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
async fn deploy_to_local_k3d( async fn deploy_to_local_k3d(
&self, &self,
app_name: String, app_name: String,
@@ -137,7 +136,7 @@ impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
#[async_trait] #[async_trait]
impl< impl<
A: OCICompliant + HelmPackage + Clone + 'static, A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static, T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
> ApplicationFeature<T> for PackagingDeployment<A> > ApplicationFeature<T> for PackagingDeployment<A>
{ {
@@ -146,10 +145,15 @@ impl<
topology: &T, topology: &T,
) -> Result<InstallationOutcome, InstallationError> { ) -> Result<InstallationOutcome, InstallationError> {
let image = self.application.image_name(); let image = self.application.image_name();
let domain = topology
let domain = if topology.current_target() == DeploymentTarget::Production {
self.application.dns()
} else {
topology
.get_domain(&self.application.name()) .get_domain(&self.application.name())
.await .await
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?
};
// TODO Write CI/CD workflow files // TODO Write CI/CD workflow files
// we can autotedect the CI type using the remote url (default to github action for github // we can autotedect the CI type using the remote url (default to github action for github
@@ -194,7 +198,7 @@ impl<
openshift: true, openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig { argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(), version: Version::from("0.2.1").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()), helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None, values_overrides: None,

View File

@@ -3,7 +3,6 @@ use std::sync::Arc;
use crate::modules::application::{ use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome, Application, ApplicationFeature, InstallationError, InstallationOutcome,
}; };
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore; use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
@@ -18,7 +17,7 @@ use crate::{
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
}; };
use crate::{ use crate::{
modules::prometheus::prometheus::PrometheusMonitoring, modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver, topology::oberservability::monitoring::AlertReceiver,
}; };
use async_trait::async_trait; use async_trait::async_trait;
@@ -42,7 +41,7 @@ impl<
+ MultiTargetTopology + MultiTargetTopology
+ Ingress + Ingress
+ std::fmt::Debug + std::fmt::Debug
+ PrometheusMonitoring<RHOBObservability>, + PrometheusApplicationMonitoring<RHOBObservability>,
> ApplicationFeature<T> for Monitoring > ApplicationFeature<T> for Monitoring
{ {
async fn ensure_installed( async fn ensure_installed(

View File

@@ -2,6 +2,7 @@ mod feature;
pub mod features; pub mod features;
pub mod oci; pub mod oci;
mod rust; mod rust;
mod webapp;
use std::sync::Arc; use std::sync::Arc;
pub use feature::*; pub use feature::*;

View File

@@ -16,6 +16,7 @@ use tar::{Builder, Header};
use walkdir::WalkDir; use walkdir::WalkDir;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::modules::application::webapp::Webapp;
use crate::{score::Score, topology::Topology}; use crate::{score::Score, topology::Topology};
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant}; use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
@@ -60,6 +61,10 @@ pub struct RustWebapp {
pub project_root: PathBuf, pub project_root: PathBuf,
pub service_port: u32, pub service_port: u32,
pub framework: Option<RustWebFramework>, pub framework: Option<RustWebFramework>,
/// Host name that will be used in production environment.
///
/// This is the place to put the public host name if this is a public facing webapp.
pub dns: String,
} }
impl Application for RustWebapp { impl Application for RustWebapp {
@@ -68,6 +73,12 @@ impl Application for RustWebapp {
} }
} }
impl Webapp for RustWebapp {
fn dns(&self) -> String {
self.dns.clone()
}
}
#[async_trait] #[async_trait]
impl HelmPackage for RustWebapp { impl HelmPackage for RustWebapp {
async fn build_push_helm_package( async fn build_push_helm_package(
@@ -194,10 +205,10 @@ impl RustWebapp {
Some(body_full(tar_data.into())), Some(body_full(tar_data.into())),
); );
while let Some(mut msg) = image_build_stream.next().await { while let Some(msg) = image_build_stream.next().await {
trace!("Got bollard msg {msg:?}"); trace!("Got bollard msg {msg:?}");
match msg { match msg {
Ok(mut msg) => { Ok(msg) => {
if let Some(progress) = msg.progress_detail { if let Some(progress) = msg.progress_detail {
info!( info!(
"Build progress {}/{}", "Build progress {}/{}",
@@ -257,7 +268,6 @@ impl RustWebapp {
".harmony_generated", ".harmony_generated",
"harmony", "harmony",
"node_modules", "node_modules",
"Dockerfile.harmony",
]; ];
let mut entries: Vec<_> = WalkDir::new(project_root) let mut entries: Vec<_> = WalkDir::new(project_root)
.into_iter() .into_iter()
@@ -461,52 +471,53 @@ impl RustWebapp {
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest")); let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
let app_name = &self.name;
let service_port = self.service_port;
// Create Chart.yaml // Create Chart.yaml
let chart_yaml = format!( let chart_yaml = format!(
r#" r#"
apiVersion: v2 apiVersion: v2
name: {} name: {chart_name}
description: A Helm chart for the {} web application. description: A Helm chart for the {app_name} web application.
type: application type: application
version: 0.1.0 version: 0.2.1
appVersion: "{}" appVersion: "{image_tag}"
"#, "#,
chart_name, self.name, image_tag
); );
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?; fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
// Create values.yaml // Create values.yaml
let values_yaml = format!( let values_yaml = format!(
r#" r#"
# Default values for {}. # Default values for {chart_name}.
# This is a YAML-formatted file. # This is a YAML-formatted file.
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
replicaCount: 1 replicaCount: 1
image: image:
repository: {} repository: {image_repo}
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# Overridden by the chart's appVersion # Overridden by the chart's appVersion
tag: "{}" tag: "{image_tag}"
service: service:
type: ClusterIP type: ClusterIP
port: {} port: {service_port}
ingress: ingress:
enabled: true enabled: true
tls: true
# Annotations for cert-manager to handle SSL. # Annotations for cert-manager to handle SSL.
annotations: annotations:
# Add other annotations like nginx ingress class if needed # Add other annotations like nginx ingress class if needed
# kubernetes.io/ingress.class: nginx # kubernetes.io/ingress.class: nginx
hosts: hosts:
- host: {} - host: {domain}
paths: paths:
- path: / - path: /
pathType: ImplementationSpecific pathType: ImplementationSpecific
"#, "#,
chart_name, image_repo, image_tag, self.service_port, domain,
); );
fs::write(chart_dir.join("values.yaml"), values_yaml)?; fs::write(chart_dir.join("values.yaml"), values_yaml)?;
@@ -583,7 +594,11 @@ spec:
); );
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?; fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
let service_port = self.service_port;
// Create templates/ingress.yaml // Create templates/ingress.yaml
// TODO get issuer name and tls config from topology as it may be different from one
// cluster to another, also from one version to another
let ingress_yaml = format!( let ingress_yaml = format!(
r#" r#"
{{{{- if $.Values.ingress.enabled -}}}} {{{{- if $.Values.ingress.enabled -}}}}
@@ -596,13 +611,11 @@ metadata:
spec: spec:
{{{{- if $.Values.ingress.tls }}}} {{{{- if $.Values.ingress.tls }}}}
tls: tls:
{{{{- range $.Values.ingress.tls }}}} - secretName: {{{{ include "chart.fullname" . }}}}-tls
- hosts: hosts:
{{{{- range .hosts }}}} {{{{- range $.Values.ingress.hosts }}}}
- {{{{ . | quote }}}} - {{{{ .host | quote }}}}
{{{{- end }}}} {{{{- end }}}}
secretName: {{{{ .secretName }}}}
{{{{- end }}}}
{{{{- end }}}} {{{{- end }}}}
rules: rules:
{{{{- range $.Values.ingress.hosts }}}} {{{{- range $.Values.ingress.hosts }}}}
@@ -616,12 +629,11 @@ spec:
service: service:
name: {{{{ include "chart.fullname" $ }}}} name: {{{{ include "chart.fullname" $ }}}}
port: port:
number: {{{{ $.Values.service.port | default {} }}}} number: {{{{ $.Values.service.port | default {service_port} }}}}
{{{{- end }}}} {{{{- end }}}}
{{{{- end }}}} {{{{- end }}}}
{{{{- end }}}} {{{{- end }}}}
"#, "#,
self.service_port
); );
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?; fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;

View File

@@ -0,0 +1,7 @@
use super::Application;
use async_trait::async_trait;
#[async_trait]
pub trait Webapp: Application {
fn dns(&self) -> String;
}

View File

@@ -0,0 +1,203 @@
use std::sync::Arc;
use log::{debug, info};
use crate::{interpret::InterpretError, topology::k8s::K8sClient};
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ArgoScope {
ClusterWide(String),
NamespaceScoped(String),
}
#[derive(Clone, Debug)]
pub struct DiscoveredArgo {
pub control_namespace: String,
pub scope: ArgoScope,
pub has_crds: bool,
pub has_applicationset: bool,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ArgoDeploymentType {
NotInstalled,
AvailableInDesiredNamespace(String),
InstalledClusterWide(String),
InstalledNamespaceScoped(String),
}
pub async fn discover_argo_all(
k8s: &Arc<K8sClient>,
) -> Result<Vec<DiscoveredArgo>, InterpretError> {
use log::{debug, info, trace, warn};
trace!("Starting Argo discovery");
// CRDs
let mut has_crds = true;
let required_crds = vec!["applications.argoproj.io", "appprojects.argoproj.io"];
trace!("Checking required Argo CRDs: {:?}", required_crds);
for crd in required_crds {
trace!("Verifying CRD presence: {crd}");
let crd_exists = k8s.has_crd(crd).await.map_err(|e| {
InterpretError::new(format!("Failed to verify existence of CRD {crd}: {e}"))
})?;
debug!("CRD {crd} exists: {crd_exists}");
if !crd_exists {
info!(
"Missing Argo CRD {crd}, looks like Argo CD is not installed (or partially installed)"
);
has_crds = false;
break;
}
}
trace!(
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/part-of=argocd"
);
let mut candidate_namespaces = k8s
.list_namespaces_with_healthy_deployments("app.kubernetes.io/part-of=argocd")
.await
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?;
trace!(
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/name=argo-cd"
);
candidate_namespaces.append(
&mut k8s
.list_namespaces_with_healthy_deployments("app.kubernetes.io/name=argo-cd")
.await
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?,
);
debug!(
"Discovered {} candidate namespace(s) for Argo CD: {:?}",
candidate_namespaces.len(),
candidate_namespaces
);
let mut found = Vec::new();
for ns in candidate_namespaces {
trace!("Evaluating namespace '{ns}' for Argo CD instance");
// Require the application-controller to be healthy (sanity check)
trace!(
"Checking healthy deployment with label app.kubernetes.io/name=argocd-application-controller in namespace '{ns}'"
);
let controller_ok = k8s
.has_healthy_deployment_with_label(
&ns,
"app.kubernetes.io/name=argocd-application-controller",
)
.await
.unwrap_or_else(|e| {
warn!(
"Error while checking application-controller health in namespace '{ns}': {e}"
);
false
}) || k8s
.has_healthy_deployment_with_label(
&ns,
"app.kubernetes.io/component=controller",
)
.await
.unwrap_or_else(|e| {
warn!(
"Error while checking application-controller health in namespace '{ns}': {e}"
);
false
});
debug!("Namespace '{ns}': application-controller healthy = {controller_ok}");
if !controller_ok {
trace!("Skipping namespace '{ns}' because application-controller is not healthy");
continue;
}
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
let scope = match k8s.is_argocd_cluster_wide(&ns).await {
Ok(true) => {
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
ArgoScope::ClusterWide(ns.to_string())
}
Ok(false) => {
debug!("Namespace '{ns}' identified as namespace-scoped Argo CD control plane");
ArgoScope::NamespaceScoped(ns.to_string())
}
Err(e) => {
warn!(
"Failed to determine Argo CD scope for namespace '{ns}': {e}. Assuming namespace-scoped."
);
ArgoScope::NamespaceScoped(ns.to_string())
}
};
trace!("Checking optional ApplicationSet CRD (applicationsets.argoproj.io)");
let has_applicationset = match k8s.has_crd("applicationsets.argoproj.io").await {
Ok(v) => {
debug!("applicationsets.argoproj.io present: {v}");
v
}
Err(e) => {
warn!("Failed to check applicationsets.argoproj.io CRD: {e}. Assuming absent.");
false
}
};
let argo = DiscoveredArgo {
control_namespace: ns.clone(),
scope,
has_crds,
has_applicationset,
};
debug!("Discovered Argo instance in '{ns}': {argo:?}");
found.push(argo);
}
if found.is_empty() {
info!("No Argo CD installations discovered");
} else {
info!(
"Argo CD discovery complete: {} instance(s) found",
found.len()
);
}
Ok(found)
}
pub async fn detect_argo_deployment_type(
k8s: &Arc<K8sClient>,
desired_namespace: &str,
) -> Result<ArgoDeploymentType, InterpretError> {
let discovered = discover_argo_all(k8s).await?;
debug!("Discovered argo instances {discovered:?}");
if discovered.is_empty() {
return Ok(ArgoDeploymentType::NotInstalled);
}
if let Some(d) = discovered
.iter()
.find(|d| d.control_namespace == desired_namespace)
{
return Ok(ArgoDeploymentType::AvailableInDesiredNamespace(
d.control_namespace.clone(),
));
}
if let Some(d) = discovered
.iter()
.find(|d| matches!(d.scope, ArgoScope::ClusterWide(_)))
{
return Ok(ArgoDeploymentType::InstalledClusterWide(
d.control_namespace.clone(),
));
}
Ok(ArgoDeploymentType::InstalledNamespaceScoped(
discovered[0].control_namespace.clone(),
))
}

View File

@@ -1,209 +0,0 @@
use std::sync::Arc;
use async_trait::async_trait;
use harmony_types::id::Id;
use kube::{CustomResource, api::ObjectMeta};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
#[derive(Clone, Debug, Serialize)]
pub struct ClusterIssuerScore {
email: String,
server: String,
issuer_name: String,
namespace: String,
}
impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore {
fn name(&self) -> String {
"ClusterIssuerScore".to_string()
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(ClusterIssuerInterpret {
score: self.clone(),
})
}
}
#[derive(Debug, Clone)]
pub struct ClusterIssuerInterpret {
score: ClusterIssuerScore,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
self.apply_cluster_issuer(topology.k8s_client().await.unwrap())
.await
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("ClusterIssuer")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl ClusterIssuerInterpret {
async fn validate_cert_manager(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let cert_manager = "cert-manager".to_string();
let operator_namespace = "openshift-operators".to_string();
match client
.get_deployment(&cert_manager, Some(&operator_namespace))
.await
{
Ok(Some(deployment)) => {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
&cert_manager, ready_count
)));
} else {
return Err(InterpretError::new(
"cert-manager operator not ready in cluster".to_string(),
));
}
} else {
Err(InterpretError::new(format!(
"failed to get deployment status {} in ns {}",
&cert_manager, &operator_namespace
)))
}
}
Ok(None) => Err(InterpretError::new(format!(
"Deployment '{}' not found in namespace '{}'.",
&cert_manager, &operator_namespace
))),
Err(e) => Err(InterpretError::new(format!(
"Failed to query for deployment '{}': {}",
&cert_manager, e
))),
}
}
fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> {
let issuer_name = &self.score.issuer_name;
let email = &self.score.email;
let server = &self.score.server;
let namespace = &self.score.namespace;
let cluster_issuer = ClusterIssuer {
metadata: ObjectMeta {
name: Some(issuer_name.to_string()),
namespace: Some(namespace.to_string()),
..Default::default()
},
spec: ClusterIssuerSpec {
acme: AcmeSpec {
email: email.to_string(),
private_key_secret_ref: PrivateKeySecretRef {
name: issuer_name.to_string(),
},
server: server.to_string(),
solvers: vec![SolverSpec {
http01: Some(Http01Solver {
ingress: Http01Ingress {
class: "nginx".to_string(),
},
}),
}],
},
},
};
Ok(cluster_issuer)
}
pub async fn apply_cluster_issuer(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let namespace = self.score.namespace.clone();
self.validate_cert_manager(&client).await?;
let cluster_issuer = self.build_cluster_issuer().unwrap();
client
.apply_yaml(
&serde_yaml::to_value(cluster_issuer).unwrap(),
Some(&namespace),
)
.await?;
Ok(Outcome::success(format!(
"successfully deployed cluster operator: {} in namespace: {}",
self.score.issuer_name, self.score.namespace
)))
}
}
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(
group = "cert-manager.io",
version = "v1",
kind = "ClusterIssuer",
plural = "clusterissuers"
)]
#[serde(rename_all = "camelCase")]
pub struct ClusterIssuerSpec {
pub acme: AcmeSpec,
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AcmeSpec {
pub email: String,
pub private_key_secret_ref: PrivateKeySecretRef,
pub server: String,
pub solvers: Vec<SolverSpec>,
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct PrivateKeySecretRef {
pub name: String,
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct SolverSpec {
pub http01: Option<Http01Solver>,
// Other solver types (e.g., dns01) would go here as Options
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct Http01Solver {
pub ingress: Http01Ingress,
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct Http01Ingress {
pub class: String,
}

View File

@@ -1,6 +1,5 @@
use std::{collections::HashMap, str::FromStr}; use std::{collections::HashMap, str::FromStr};
use harmony_macros::hurl;
use non_blank_string_rs::NonBlankString; use non_blank_string_rs::NonBlankString;
use serde::Serialize; use serde::Serialize;
use url::Url; use url::Url;
@@ -34,7 +33,7 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
install_only: true, install_only: true,
repository: Some(HelmRepository::new( repository: Some(HelmRepository::new(
"jetstack".to_string(), "jetstack".to_string(),
hurl!("https://charts.jetstack.io"), Url::parse("https://charts.jetstack.io").unwrap(),
true, true,
)), )),
} }

View File

@@ -1,3 +1,2 @@
pub mod cluster_issuer;
mod helm; mod helm;
pub use helm::*; pub use helm::*;

View File

@@ -5,7 +5,6 @@ use crate::score::Score;
use crate::topology::{HelmCommand, Topology}; use crate::topology::{HelmCommand, Topology};
use async_trait::async_trait; use async_trait::async_trait;
use harmony_types::id::Id; use harmony_types::id::Id;
use harmony_types::net::Url;
use helm_wrapper_rs; use helm_wrapper_rs;
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor}; use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
use log::{debug, info, warn}; use log::{debug, info, warn};
@@ -16,6 +15,7 @@ use std::path::Path;
use std::process::{Command, Output, Stdio}; use std::process::{Command, Output, Stdio};
use std::str::FromStr; use std::str::FromStr;
use temp_file::TempFile; use temp_file::TempFile;
use url::Url;
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct HelmRepository { pub struct HelmRepository {
@@ -78,8 +78,7 @@ impl HelmChartInterpret {
repo.name, repo.url, repo.force_update repo.name, repo.url, repo.force_update
); );
let repo_url = repo.url.to_string(); let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
let mut add_args = vec!["repo", "add", &repo.name, &repo_url];
if repo.force_update { if repo.force_update {
add_args.push("--force-update"); add_args.push("--force-update");
} }

View File

@@ -0,0 +1,364 @@
use async_trait::async_trait;
use log::debug;
use serde::Serialize;
use std::collections::HashMap;
use std::io::ErrorKind;
use std::path::PathBuf;
use std::process::{Command, Output};
use temp_dir::{self, TempDir};
use temp_file::TempFile;
use crate::data::Version;
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
use crate::inventory::Inventory;
use crate::score::Score;
use crate::topology::{HelmCommand, K8sclient, Topology};
use harmony_types::id::Id;
#[derive(Clone)]
pub struct HelmCommandExecutor {
pub env: HashMap<String, String>,
pub path: Option<PathBuf>,
pub args: Vec<String>,
pub api_versions: Option<Vec<String>>,
pub kube_version: String,
pub debug: Option<bool>,
pub globals: HelmGlobals,
pub chart: HelmChart,
}
#[derive(Clone)]
pub struct HelmGlobals {
pub chart_home: Option<PathBuf>,
pub config_home: Option<PathBuf>,
}
#[derive(Debug, Clone, Serialize)]
pub struct HelmChart {
pub name: String,
pub version: Option<String>,
pub repo: Option<String>,
pub release_name: Option<String>,
pub namespace: Option<String>,
pub additional_values_files: Vec<PathBuf>,
pub values_file: Option<PathBuf>,
pub values_inline: Option<String>,
pub include_crds: Option<bool>,
pub skip_hooks: Option<bool>,
pub api_versions: Option<Vec<String>>,
pub kube_version: Option<String>,
pub name_template: String,
pub skip_tests: Option<bool>,
pub debug: Option<bool>,
}
impl HelmCommandExecutor {
pub fn generate(mut self) -> Result<String, std::io::Error> {
if self.globals.chart_home.is_none() {
self.globals.chart_home = Some(PathBuf::from("charts"));
}
if self
.clone()
.chart
.clone()
.chart_exists_locally(self.clone().globals.chart_home.unwrap())
.is_none()
{
if self.chart.repo.is_none() {
return Err(std::io::Error::new(
ErrorKind::Other,
"Chart doesn't exist locally and no repo specified",
));
}
self.clone().run_command(
self.chart
.clone()
.pull_command(self.globals.chart_home.clone().unwrap()),
)?;
}
let out = self.clone().run_command(
self.chart
.clone()
.helm_args(self.globals.chart_home.clone().unwrap()),
)?;
// TODO: don't use unwrap here
let s = String::from_utf8(out.stdout).unwrap();
debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
debug!("helm status: {}", out.status);
debug!("helm output: {s}");
let clean = s.split_once("---").unwrap().1;
Ok(clean.to_string())
}
pub fn version(self) -> Result<String, std::io::Error> {
let out = self.run_command(vec![
"version".to_string(),
"-c".to_string(),
"--short".to_string(),
])?;
// TODO: don't use unwrap
Ok(String::from_utf8(out.stdout).unwrap())
}
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
}
let path = if let Some(p) = self.path {
p
} else {
PathBuf::from("helm")
};
let config_home = match self.globals.config_home {
Some(p) => p,
None => PathBuf::from(TempDir::new()?.path()),
};
if let Some(yaml_str) = self.chart.values_inline {
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
};
self.env.insert(
"HELM_CONFIG_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
self.env.insert(
"HELM_CACHE_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
self.env.insert(
"HELM_DATA_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
Command::new(path).envs(self.env).args(args).output()
}
}
impl HelmChart {
pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
let chart_path =
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
if chart_path.exists() {
Some(chart_path)
} else {
None
}
}
pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
let mut args = vec![
"pull".to_string(),
"--untar".to_string(),
"--untardir".to_string(),
chart_home.to_str().unwrap().to_string(),
];
match self.repo {
Some(r) => {
if r.starts_with("oci://") {
args.push(
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
);
} else {
args.push("--repo".to_string());
args.push(r.to_string());
args.push(self.name);
}
}
None => args.push(self.name),
};
if let Some(v) = self.version {
args.push("--version".to_string());
args.push(v.to_string());
}
args
}
pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
let mut args: Vec<String> = vec!["template".to_string()];
match self.release_name {
Some(rn) => args.push(rn.to_string()),
None => args.push("--generate-name".to_string()),
}
args.push(
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
.to_str()
.unwrap()
.to_string(),
);
if let Some(n) = self.namespace {
args.push("--namespace".to_string());
args.push(n.to_string());
}
if let Some(f) = self.values_file {
args.push("-f".to_string());
args.push(f.to_str().unwrap().to_string());
}
for f in self.additional_values_files {
args.push("-f".to_string());
args.push(f.to_str().unwrap().to_string());
}
if let Some(vv) = self.api_versions {
for v in vv {
args.push("--api-versions".to_string());
args.push(v);
}
}
if let Some(kv) = self.kube_version {
args.push("--kube-version".to_string());
args.push(kv);
}
if let Some(crd) = self.include_crds {
if crd {
args.push("--include-crds".to_string());
}
}
if let Some(st) = self.skip_tests {
if st {
args.push("--skip-tests".to_string());
}
}
if let Some(sh) = self.skip_hooks {
if sh {
args.push("--no-hooks".to_string());
}
}
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
}
args
}
}
#[derive(Debug, Clone, Serialize)]
pub struct HelmChartScoreV2 {
pub chart: HelmChart,
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HelmChartInterpretV2 {
score: self.clone(),
})
}
fn name(&self) -> String {
format!(
"{} {} HelmChartScoreV2",
self.chart
.release_name
.clone()
.unwrap_or("Unknown".to_string()),
self.chart.name
)
}
}
#[derive(Debug, Serialize)]
pub struct HelmChartInterpretV2 {
pub score: HelmChartScoreV2,
}
impl HelmChartInterpretV2 {}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let _ns = self
.score
.chart
.namespace
.as_ref()
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
let helm_executor = HelmCommandExecutor {
env: HashMap::new(),
path: None,
args: vec![],
api_versions: None,
kube_version: "v1.33.0".to_string(),
debug: Some(false),
globals: HelmGlobals {
chart_home: None,
config_home: None,
},
chart: self.score.chart.clone(),
};
// let mut helm_options = Vec::new();
// if self.score.create_namespace {
// helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
// }
let res = helm_executor.generate();
let _output = match res {
Ok(output) => output,
Err(err) => return Err(InterpretError::new(err.to_string())),
};
// TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
// let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
// let client = topology
// .k8s_client()
// .await
// .expect("Environment should provide enough information to instanciate a client")
// .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
// match client.apply_yaml(output) {
// Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
// Err(e) => return Err(InterpretError::new(e)),
// }
Ok(Outcome::success("Helm chart deployed".to_string()))
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmCommand
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -1 +1,2 @@
pub mod chart; pub mod chart;
pub mod command;

View File

@@ -90,12 +90,12 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
// refactoring to do it now // refactoring to do it now
let harmony_inventory_agent::hwinfo::PhysicalHost { let harmony_inventory_agent::hwinfo::PhysicalHost {
storage_drives, storage_drives,
storage_controller, storage_controller: _,
memory_modules, memory_modules,
cpus, cpus,
chipset, chipset: _,
network_interfaces, network_interfaces,
management_interface, management_interface: _,
host_uuid, host_uuid,
} = host; } = host;

View File

@@ -38,15 +38,13 @@ impl<
+ 'static + 'static
+ Send + Send
+ Clone, + Clone,
T: Topology + K8sclient, T: Topology,
> Score<T> for K8sResourceScore<K> > Score<T> for K8sResourceScore<K>
where where
<K as kube::Resource>::DynamicType: Default, <K as kube::Resource>::DynamicType: Default,
{ {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(K8sResourceInterpret { todo!()
score: self.clone(),
})
} }
fn name(&self) -> String { fn name(&self) -> String {

View File

@@ -17,3 +17,4 @@ pub mod prometheus;
pub mod storage; pub mod storage;
pub mod tenant; pub mod tenant;
pub mod tftp; pub mod tftp;
pub mod argocd;

View File

@@ -1,23 +1,21 @@
use std::sync::Arc; use std::sync::Arc;
use log::debug; use async_trait::async_trait;
use serde::Serialize; use serde::Serialize;
use crate::{ use crate::{
interpret::Interpret, data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::{ modules::{
application::Application, application::Application,
monitoring::{ monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
grafana::grafana::Grafana, kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus, prometheus::prometheus::PrometheusApplicationMonitoring,
},
prometheus::prometheus::PrometheusMonitoring,
}, },
score::Score, score::Score,
topology::{ topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
K8sclient, Topology,
oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
},
}; };
use harmony_types::id::Id;
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct ApplicationMonitoringScore { pub struct ApplicationMonitoringScore {
@@ -26,16 +24,12 @@ pub struct ApplicationMonitoringScore {
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>, pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
} }
impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Score<T> impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
for ApplicationMonitoringScore for ApplicationMonitoringScore
{ {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
debug!("creating alerting interpret"); Box::new(ApplicationMonitoringInterpret {
Box::new(AlertingInterpret { score: self.clone(),
sender: self.sender.clone(),
receivers: self.receivers.clone(),
rules: vec![],
scrape_targets: None,
}) })
} }
@@ -46,3 +40,55 @@ impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Sc
) )
} }
} }
#[derive(Debug)]
pub struct ApplicationMonitoringInterpret {
score: ApplicationMonitoringScore,
}
#[async_trait]
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
for ApplicationMonitoringInterpret
{
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let result = topology
.install_prometheus(
&self.score.sender,
inventory,
Some(self.score.receivers.clone()),
)
.await;
match result {
Ok(outcome) => match outcome {
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => {
Ok(Outcome::noop("Prometheus installation skipped".into()))
}
},
Err(err) => Err(InterpretError::from(err)),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::ApplicationMonitoring
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -12,7 +12,7 @@ use crate::{
monitoring::kube_prometheus::crd::{ monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability, crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
}, },
prometheus::prometheus::PrometheusMonitoring, prometheus::prometheus::PrometheusApplicationMonitoring,
}, },
score::Score, score::Score,
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver}, topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
@@ -26,7 +26,7 @@ pub struct ApplicationRHOBMonitoringScore {
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>, pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
} }
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Score<T> impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
for ApplicationRHOBMonitoringScore for ApplicationRHOBMonitoringScore
{ {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
@@ -49,7 +49,7 @@ pub struct ApplicationRHOBMonitoringInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Interpret<T> impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
for ApplicationRHOBMonitoringInterpret for ApplicationRHOBMonitoringInterpret
{ {
async fn execute( async fn execute(

View File

@@ -1,17 +0,0 @@
use async_trait::async_trait;
use k8s_openapi::Resource;
use crate::{
inventory::Inventory,
topology::{PreparationError, PreparationOutcome},
};
#[async_trait]
pub trait Grafana {
async fn ensure_grafana_operator(
&self,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError>;
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError>;
}

View File

@@ -1,28 +1,27 @@
use harmony_macros::hurl;
use non_blank_string_rs::NonBlankString; use non_blank_string_rs::NonBlankString;
use std::{collections::HashMap, str::FromStr}; use std::str::FromStr;
use crate::modules::helm::chart::{HelmChartScore, HelmRepository}; use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
let values = r#"
rbac:
namespaced: true
sidecar:
dashboards:
enabled: true
"#
.to_string();
pub fn grafana_helm_chart_score(ns: &str, namespace_scope: bool) -> HelmChartScore {
let mut values_overrides = HashMap::new();
values_overrides.insert(
NonBlankString::from_str("namespaceScope").unwrap(),
namespace_scope.to_string(),
);
HelmChartScore { HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()), namespace: Some(NonBlankString::from_str(ns).unwrap()),
release_name: NonBlankString::from_str("grafana-operator").unwrap(), release_name: NonBlankString::from_str("grafana").unwrap(),
chart_name: NonBlankString::from_str("grafana/grafana-operator").unwrap(), chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
chart_version: None, chart_version: None,
values_overrides: Some(values_overrides), values_overrides: None,
values_yaml: None, values_yaml: Some(values.to_string()),
create_namespace: true, create_namespace: true,
install_only: true, install_only: true,
repository: Some(HelmRepository::new( repository: None,
"grafana".to_string(),
hurl!("https://grafana.github.io/helm-charts"),
true,
)),
} }
} }

View File

@@ -1,2 +1 @@
pub mod grafana;
pub mod helm; pub mod helm;

View File

@@ -1,25 +1,12 @@
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait;
use kube::CustomResource; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::topology::{
interpret::{InterpretError, Outcome}, k8s::K8sClient,
inventory::Inventory, oberservability::monitoring::{AlertReceiver, AlertSender},
modules::{
monitoring::{
grafana::grafana::Grafana, kube_prometheus::crd::service_monitor::ServiceMonitor,
},
prometheus::prometheus::PrometheusMonitoring,
},
topology::{
K8sclient, Topology,
installable::Installable,
k8s::K8sClient,
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
},
}; };
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
@@ -39,7 +26,6 @@ pub struct AlertmanagerConfigSpec {
pub struct CRDPrometheus { pub struct CRDPrometheus {
pub namespace: String, pub namespace: String,
pub client: Arc<K8sClient>, pub client: Arc<K8sClient>,
pub service_monitor: Vec<ServiceMonitor>,
} }
impl AlertSender for CRDPrometheus { impl AlertSender for CRDPrometheus {
@@ -54,12 +40,6 @@ impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
} }
} }
impl Clone for Box<dyn ScrapeTarget<CRDPrometheus>> {
fn clone(&self) -> Self {
self.clone_box()
}
}
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> { impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where where
@@ -68,24 +48,3 @@ impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
todo!() todo!()
} }
} }
#[async_trait]
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus> + Grafana> Installable<T>
for CRDPrometheus
{
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
topology.ensure_grafana_operator(inventory).await?;
topology.ensure_prometheus_operator(self, inventory).await?;
Ok(())
}
async fn ensure_installed(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<(), InterpretError> {
topology.install_grafana().await?;
topology.install_prometheus(&self, inventory, None).await?;
Ok(())
}
}

View File

@@ -103,34 +103,9 @@ pub struct GrafanaDashboardSpec {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub resync_period: Option<String>, pub resync_period: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub datasources: Option<Vec<GrafanaDashboardDatasource>>,
pub instance_selector: LabelSelector, pub instance_selector: LabelSelector,
#[serde(default, skip_serializing_if = "Option::is_none")] pub json: String,
pub json: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub grafana_com: Option<GrafanaCom>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDashboardDatasource {
pub input_name: String,
pub datasource_name: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaCom {
pub id: u32,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub revision: Option<u32>,
} }
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
@@ -151,79 +126,20 @@ pub struct GrafanaDatasourceSpec {
pub allow_cross_namespace_import: Option<bool>, pub allow_cross_namespace_import: Option<bool>,
pub datasource: GrafanaDatasourceConfig, pub datasource: GrafanaDatasourceConfig,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub values_from: Option<Vec<GrafanaValueFrom>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaValueFrom {
pub target_path: String,
pub value_from: GrafanaValueSource,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaValueSource {
pub secret_key_ref: GrafanaSecretKeyRef,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSecretKeyRef {
pub name: String,
pub key: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceConfig { pub struct GrafanaDatasourceConfig {
pub access: String, pub access: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub database: Option<String>, pub database: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub json_data: Option<BTreeMap<String, String>>,
pub name: String, pub name: String,
pub r#type: String, pub r#type: String,
pub url: String, pub url: String,
/// Represents jsonData in the GrafanaDatasource spec
#[serde(default, skip_serializing_if = "Option::is_none")]
pub json_data: Option<GrafanaDatasourceJsonData>,
/// Represents secureJsonData (secrets)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub secure_json_data: Option<GrafanaDatasourceSecureJsonData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub editable: Option<bool>,
} }
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceJsonData {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub time_interval: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub http_header_name1: Option<String>,
/// Disable TLS skip verification (false = verify)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tls_skip_verify: Option<bool>,
/// Auth type - set to "forward" for OpenShift OAuth identity
#[serde(default, skip_serializing_if = "Option::is_none")]
pub oauth_pass_thru: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceSecureJsonData {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub http_header_value1: Option<String>,
}
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)] #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]

View File

@@ -1,187 +0,0 @@
use std::net::IpAddr;
use async_trait::async_trait;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{
modules::monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, crd_prometheuses::LabelSelector,
},
topology::oberservability::monitoring::ScrapeTarget,
};
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1alpha1",
kind = "ScrapeConfig",
plural = "scrapeconfigs",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct ScrapeConfigSpec {
/// List of static configurations.
pub static_configs: Option<Vec<StaticConfig>>,
/// Kubernetes service discovery.
pub kubernetes_sd_configs: Option<Vec<KubernetesSDConfig>>,
/// HTTP-based service discovery.
pub http_sd_configs: Option<Vec<HttpSDConfig>>,
/// File-based service discovery.
pub file_sd_configs: Option<Vec<FileSDConfig>>,
/// DNS-based service discovery.
pub dns_sd_configs: Option<Vec<DnsSDConfig>>,
/// Consul service discovery.
pub consul_sd_configs: Option<Vec<ConsulSDConfig>>,
/// Relabeling configuration applied to discovered targets.
pub relabel_configs: Option<Vec<RelabelConfig>>,
/// Metric relabeling configuration applied to scraped samples.
pub metric_relabel_configs: Option<Vec<RelabelConfig>>,
/// Path to scrape metrics from (defaults to `/metrics`).
pub metrics_path: Option<String>,
/// Interval at which Prometheus scrapes targets (e.g., "30s").
pub scrape_interval: Option<String>,
/// Timeout for scraping (e.g., "10s").
pub scrape_timeout: Option<String>,
/// Optional job name override.
pub job_name: Option<String>,
/// Optional scheme (http or https).
pub scheme: Option<String>,
/// Authorization paramaters for snmp walk
pub params: Option<Params>,
}
/// Static configuration section of a ScrapeConfig.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct StaticConfig {
pub targets: Vec<String>,
pub labels: Option<LabelSelector>,
}
/// Relabeling configuration for target or metric relabeling.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct RelabelConfig {
pub source_labels: Option<Vec<String>>,
pub separator: Option<String>,
pub target_label: Option<String>,
pub regex: Option<String>,
pub modulus: Option<u64>,
pub replacement: Option<String>,
pub action: Option<String>,
}
/// Kubernetes service discovery configuration.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct KubernetesSDConfig {
///"pod", "service", "endpoints"pub role: String,
pub namespaces: Option<NamespaceSelector>,
pub selectors: Option<Vec<LabelSelector>>,
pub api_server: Option<String>,
pub bearer_token_file: Option<String>,
pub tls_config: Option<TLSConfig>,
}
/// Namespace selector for Kubernetes service discovery.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct NamespaceSelector {
pub any: Option<bool>,
pub match_names: Option<Vec<String>>,
}
/// HTTP-based service discovery configuration.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct HttpSDConfig {
pub url: String,
pub refresh_interval: Option<String>,
pub basic_auth: Option<BasicAuth>,
pub authorization: Option<Authorization>,
pub tls_config: Option<TLSConfig>,
}
/// File-based service discovery configuration.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct FileSDConfig {
pub files: Vec<String>,
pub refresh_interval: Option<String>,
}
/// DNS-based service discovery configuration.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct DnsSDConfig {
pub names: Vec<String>,
pub refresh_interval: Option<String>,
pub type_: Option<String>, // SRV, A, AAAA
pub port: Option<u16>,
}
/// Consul service discovery configuration.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct ConsulSDConfig {
pub server: String,
pub services: Option<Vec<String>>,
pub scheme: Option<String>,
pub datacenter: Option<String>,
pub tag_separator: Option<String>,
pub refresh_interval: Option<String>,
pub tls_config: Option<TLSConfig>,
}
/// Basic authentication credentials.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct BasicAuth {
pub username: String,
pub password: Option<String>,
pub password_file: Option<String>,
}
/// Bearer token or other auth mechanisms.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct Authorization {
pub credentials: Option<String>,
pub credentials_file: Option<String>,
pub type_: Option<String>,
}
/// TLS configuration for secure scraping.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct TLSConfig {
pub ca_file: Option<String>,
pub cert_file: Option<String>,
pub key_file: Option<String>,
pub server_name: Option<String>,
pub insecure_skip_verify: Option<bool>,
}
/// Authorization parameters for SNMP walk.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct Params {
pub auth: Option<Vec<String>>,
pub module: Option<Vec<String>>,
}

View File

@@ -4,7 +4,6 @@ pub mod crd_default_rules;
pub mod crd_grafana; pub mod crd_grafana;
pub mod crd_prometheus_rules; pub mod crd_prometheus_rules;
pub mod crd_prometheuses; pub mod crd_prometheuses;
pub mod crd_scrape_config;
pub mod grafana_default_dashboard; pub mod grafana_default_dashboard;
pub mod grafana_operator; pub mod grafana_operator;
pub mod prometheus_operator; pub mod prometheus_operator;

View File

@@ -1,12 +1,8 @@
use std::collections::BTreeMap;
use kube::CustomResource; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{ use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
LabelSelector, PrometheusSpec,
};
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1 /// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@@ -31,7 +31,6 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
sender: KubePrometheus { config }, sender: KubePrometheus { config },
receivers: self.receivers.clone(), receivers: self.receivers.clone(),
rules: self.rules.clone(), rules: self.rules.clone(),
scrape_targets: None,
}) })
} }
fn name(&self) -> String { fn name(&self) -> String {

View File

@@ -6,4 +6,3 @@ pub mod kube_prometheus;
pub mod ntfy; pub mod ntfy;
pub mod okd; pub mod okd;
pub mod prometheus; pub mod prometheus;
pub mod scrape_target;

View File

@@ -100,7 +100,11 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
info!("deploying ntfy..."); info!("deploying ntfy...");
client client
.wait_until_deployment_ready("ntfy", Some(self.score.namespace.as_str()), None) .wait_until_deployment_ready(
"ntfy".to_string(),
Some(self.score.namespace.as_str()),
None,
)
.await?; .await?;
info!("ntfy deployed"); info!("ntfy deployed");

View File

@@ -114,7 +114,7 @@ impl Prometheus {
}; };
if let Some(ns) = namespace.as_deref() { if let Some(ns) = namespace.as_deref() {
grafana_helm_chart_score(ns, false) grafana_helm_chart_score(ns)
.interpret(inventory, topology) .interpret(inventory, topology)
.await .await
} else { } else {

View File

@@ -1 +0,0 @@
pub mod server;

View File

@@ -1,80 +0,0 @@
use std::net::IpAddr;
use async_trait::async_trait;
use kube::api::ObjectMeta;
use serde::Serialize;
use crate::{
interpret::{InterpretError, Outcome},
modules::monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus,
crd_scrape_config::{Params, RelabelConfig, ScrapeConfig, ScrapeConfigSpec, StaticConfig},
},
topology::oberservability::monitoring::ScrapeTarget,
};
#[derive(Debug, Clone, Serialize)]
pub struct Server {
pub name: String,
pub ip: IpAddr,
pub auth: String,
pub module: String,
pub domain: String,
}
#[async_trait]
impl ScrapeTarget<CRDPrometheus> for Server {
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
let scrape_config_spec = ScrapeConfigSpec {
static_configs: Some(vec![StaticConfig {
targets: vec![self.ip.to_string()],
labels: None,
}]),
scrape_interval: Some("2m".to_string()),
kubernetes_sd_configs: None,
http_sd_configs: None,
file_sd_configs: None,
dns_sd_configs: None,
params: Some(Params {
auth: Some(vec![self.auth.clone()]),
module: Some(vec![self.module.clone()]),
}),
consul_sd_configs: None,
relabel_configs: Some(vec![RelabelConfig {
action: None,
source_labels: Some(vec!["__address__".to_string()]),
separator: None,
target_label: Some("__param_target".to_string()),
regex: None,
replacement: Some(format!("snmp.{}:31080", self.domain.clone())),
modulus: None,
}]),
metric_relabel_configs: None,
metrics_path: Some("/snmp".to_string()),
scrape_timeout: Some("2m".to_string()),
job_name: Some(format!("snmp_exporter/cloud/{}", self.name.clone())),
scheme: None,
};
let scrape_config = ScrapeConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
namespace: Some(sender.namespace.clone()),
..Default::default()
},
spec: scrape_config_spec,
};
sender
.client
.apply(&scrape_config, Some(&sender.namespace.clone()))
.await?;
Ok(Outcome::success(format!(
"installed scrape target {}",
self.name.clone()
)))
}
fn clone_box(&self) -> Box<dyn ScrapeTarget<CRDPrometheus>> {
Box::new(self.clone())
}
}

View File

@@ -28,7 +28,7 @@ pub struct OKDSetup03ControlPlaneScore {}
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore { impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup03ControlPlaneInterpret::new()) Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
} }
fn name(&self) -> String { fn name(&self) -> String {
@@ -38,15 +38,17 @@ impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct OKDSetup03ControlPlaneInterpret { pub struct OKDSetup03ControlPlaneInterpret {
score: OKDSetup03ControlPlaneScore,
version: Version, version: Version,
status: InterpretStatus, status: InterpretStatus,
} }
impl OKDSetup03ControlPlaneInterpret { impl OKDSetup03ControlPlaneInterpret {
pub fn new() -> Self { pub fn new(score: OKDSetup03ControlPlaneScore) -> Self {
let version = Version::from("1.0.0").unwrap(); let version = Version::from("1.0.0").unwrap();
Self { Self {
version, version,
score,
status: InterpretStatus::QUEUED, status: InterpretStatus::QUEUED,
} }
} }
@@ -157,7 +159,7 @@ impl OKDSetup03ControlPlaneInterpret {
} }
.to_string(); .to_string();
debug!("[ControlPlane] iPXE content template:\n{content}"); debug!("[ControlPlane] iPXE content template:\n{}", content);
// Create and apply an iPXE boot file for each node. // Create and apply an iPXE boot file for each node.
for node in nodes { for node in nodes {
@@ -187,13 +189,16 @@ impl OKDSetup03ControlPlaneInterpret {
/// Prompts the user to reboot the target control plane nodes. /// Prompts the user to reboot the target control plane nodes.
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> { async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect(); let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",); info!(
"[ControlPlane] Requesting reboot for control plane nodes: {:?}",
node_ids
);
let confirmation = inquire::Confirm::new( let confirmation = inquire::Confirm::new(
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
) )
.prompt() .prompt()
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
if !confirmation { if !confirmation {
return Err(InterpretError::new( return Err(InterpretError::new(
@@ -203,6 +208,19 @@ impl OKDSetup03ControlPlaneInterpret {
Ok(()) Ok(())
} }
/// Placeholder for automating network bonding configuration.
async fn persist_network_bond(&self) -> Result<(), InterpretError> {
// Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP");
inquire::Confirm::new(
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
)
.prompt()
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
Ok(())
}
} }
#[async_trait] #[async_trait]
@@ -241,6 +259,9 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
// 4. Reboot the nodes to start the OS installation. // 4. Reboot the nodes to start the OS installation.
self.reboot_targets(&nodes).await?; self.reboot_targets(&nodes).await?;
// 5. Placeholder for post-boot network configuration (e.g., bonding).
self.persist_network_bond().await?;
// TODO: Implement a step to wait for the control plane nodes to join the cluster // TODO: Implement a step to wait for the control plane nodes to join the cluster
// and for the cluster operators to become available. This would be similar to // and for the cluster operators to become available. This would be similar to
// the `wait-for bootstrap-complete` command. // the `wait-for bootstrap-complete` command.

View File

@@ -77,8 +77,6 @@ impl OKDBootstrapLoadBalancerScore {
address: topology.bootstrap_host.ip.to_string(), address: topology.bootstrap_host.ip.to_string(),
port, port,
}); });
backend.dedup();
backend backend
} }
} }

View File

@@ -1,130 +0,0 @@
use crate::{
data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::okd::host_network::HostNetworkConfigurationScore,
score::Score,
topology::HAClusterTopology,
};
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::info;
use serde::Serialize;
// -------------------------------------------------------------------------------------------------
// Persist Network Bond
// - Persist bonding via NMState
// - Persist port channels on the Switch
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDSetupPersistNetworkBondScore {}
impl Score<HAClusterTopology> for OKDSetupPersistNetworkBondScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetupPersistNetworkBondInterpet::new())
}
fn name(&self) -> String {
"OKDSetupPersistNetworkBondScore".to_string()
}
}
#[derive(Debug, Clone)]
pub struct OKDSetupPersistNetworkBondInterpet {
version: Version,
status: InterpretStatus,
}
impl OKDSetupPersistNetworkBondInterpet {
pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
status: InterpretStatus::QUEUED,
}
}
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
/// It will trigger discovery if not enough hosts are found.
async fn get_nodes(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Vec<PhysicalHost>, InterpretError> {
const REQUIRED_HOSTS: usize = 3;
let repo = InventoryRepositoryFactory::build().await?;
let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
if control_plane_hosts.len() < REQUIRED_HOSTS {
Err(InterpretError::new(format!(
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
REQUIRED_HOSTS,
control_plane_hosts.len()
)))
} else {
// Take exactly the number of required hosts to ensure consistency.
Ok(control_plane_hosts
.into_iter()
.take(REQUIRED_HOSTS)
.collect())
}
}
async fn persist_network_bond(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
hosts: &Vec<PhysicalHost>,
) -> Result<(), InterpretError> {
info!("Ensuring persistent bonding");
let score = HostNetworkConfigurationScore {
hosts: hosts.clone(),
};
score.interpret(inventory, topology).await?;
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetupPersistNetworkBondInterpet {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetupPersistNetworkBondInterpet")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let nodes = self.get_nodes(inventory, topology).await?;
let res = self.persist_network_bond(inventory, topology, &nodes).await;
match res {
Ok(_) => Ok(Outcome::success(
"Network bond successfully persisted".into(),
)),
Err(_) => Err(InterpretError::new(
"Failed to persist network bond".to_string(),
)),
}
}
}

View File

@@ -1 +0,0 @@
pub mod nmstate;

View File

@@ -1,322 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::Value;
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(
group = "nmstate.io",
version = "v1",
kind = "NMState",
plural = "nmstates",
namespaced = false
)]
#[serde(rename_all = "camelCase")]
pub struct NMStateSpec {
#[serde(skip_serializing_if = "Option::is_none")]
pub probe_configuration: Option<ProbeConfig>,
}
impl Default for NMState {
fn default() -> Self {
Self {
metadata: Default::default(),
spec: NMStateSpec {
probe_configuration: None,
},
}
}
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct ProbeConfig {
pub dns: ProbeDns,
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct ProbeDns {
pub host: String,
}
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(
group = "nmstate.io",
version = "v1",
kind = "NodeNetworkConfigurationPolicy",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct NodeNetworkConfigurationPolicySpec {
#[serde(skip_serializing_if = "Option::is_none")]
pub node_selector: Option<BTreeMap<String, String>>,
pub desired_state: DesiredStateSpec,
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct DesiredStateSpec {
pub interfaces: Vec<InterfaceSpec>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct InterfaceSpec {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
pub r#type: String,
pub state: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub mac_address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub copy_mac_from: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mtu: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub controller: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv4: Option<IpStackSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv6: Option<IpStackSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ethernet: Option<EthernetSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub link_aggregation: Option<BondSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub vlan: Option<VlanSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub vxlan: Option<VxlanSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mac_vtap: Option<MacVtapSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mac_vlan: Option<MacVlanSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub infiniband: Option<InfinibandSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub linux_bridge: Option<LinuxBridgeSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ovs_bridge: Option<OvsBridgeSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ethtool: Option<EthtoolSpec>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct IpStackSpec {
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub dhcp: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub autoconf: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub address: Option<Vec<IpAddressSpec>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub auto_dns: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub auto_gateway: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub auto_routes: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub dhcp_client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub dhcp_duid: Option<String>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct IpAddressSpec {
pub ip: String,
pub prefix_length: u8,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct EthernetSpec {
#[serde(skip_serializing_if = "Option::is_none")]
pub speed: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub duplex: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub auto_negotiation: Option<bool>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct BondSpec {
pub mode: String,
pub ports: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<BTreeMap<String, Value>>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct VlanSpec {
pub base_iface: String,
pub id: u16,
#[serde(skip_serializing_if = "Option::is_none")]
pub protocol: Option<String>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct VxlanSpec {
pub base_iface: String,
pub id: u32,
pub remote: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub local: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub learning: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub destination_port: Option<u16>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct MacVtapSpec {
pub base_iface: String,
pub mode: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub promiscuous: Option<bool>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct MacVlanSpec {
pub base_iface: String,
pub mode: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub promiscuous: Option<bool>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct InfinibandSpec {
pub base_iface: String,
pub pkey: String,
pub mode: String,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct LinuxBridgeSpec {
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<LinuxBridgeOptions>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ports: Option<Vec<LinuxBridgePort>>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct LinuxBridgeOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub mac_ageing_time: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub multicast_snooping: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub stp: Option<StpOptions>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct StpOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub forward_delay: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub hello_time: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub max_age: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub priority: Option<u16>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct LinuxBridgePort {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub vlan: Option<LinuxBridgePortVlan>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct LinuxBridgePortVlan {
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub trunk_tags: Option<Vec<VlanTag>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tag: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enable_native: Option<bool>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct VlanTag {
pub id: u16,
#[serde(skip_serializing_if = "Option::is_none")]
pub id_range: Option<VlanIdRange>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct VlanIdRange {
pub min: u16,
pub max: u16,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct OvsBridgeSpec {
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<OvsBridgeOptions>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ports: Option<Vec<OvsPortSpec>>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct OvsBridgeOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub stp: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rstp: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mcast_snooping_enable: Option<bool>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct OvsPortSpec {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub link_aggregation: Option<BondSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub vlan: Option<LinuxBridgePortVlan>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#type: Option<String>,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct EthtoolSpec {
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct EthtoolFecSpec {
#[serde(skip_serializing_if = "Option::is_none")]
pub auto: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<String>,
}

View File

@@ -1,489 +0,0 @@
use async_trait::async_trait;
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize;
use crate::{
data::Version,
hardware::PhysicalHost,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
};
#[derive(Debug, Clone, Serialize)]
pub struct HostNetworkConfigurationScore {
pub hosts: Vec<PhysicalHost>,
}
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
fn name(&self) -> String {
"HostNetworkConfigurationScore".into()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HostNetworkConfigurationInterpret {
score: self.clone(),
})
}
}
#[derive(Debug)]
pub struct HostNetworkConfigurationInterpret {
score: HostNetworkConfigurationScore,
}
impl HostNetworkConfigurationInterpret {
async fn configure_network_for_host<T: Topology + Switch>(
&self,
topology: &T,
host: &PhysicalHost,
current_host: &usize,
total_hosts: &usize,
) -> Result<HostNetworkConfig, InterpretError> {
if host.network.is_empty() {
info!("[Host {current_host}/{total_hosts}] No interfaces to configure, skipping");
return Ok(HostNetworkConfig {
host_id: host.id.clone(),
switch_ports: vec![],
});
}
let switch_ports = self
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
.await?;
let config = HostNetworkConfig {
host_id: host.id.clone(),
switch_ports,
};
if !config.switch_ports.is_empty() {
info!(
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
config.switch_ports.len(),
host.network.len()
);
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
topology
.configure_host_network(&config)
.await
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
} else {
info!(
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
host.network.len()
);
}
Ok(config)
}
async fn collect_switch_ports_for_host<T: Topology + Switch>(
&self,
topology: &T,
host: &PhysicalHost,
current_host: &usize,
total_hosts: &usize,
) -> Result<Vec<SwitchPort>, InterpretError> {
let mut switch_ports = vec![];
if host.network.is_empty() {
return Ok(switch_ports);
}
info!("[Host {current_host}/{total_hosts}] Collecting ports on switch...");
for network_interface in &host.network {
let mac_address = network_interface.mac_address;
match topology.get_port_for_mac_address(&mac_address).await {
Ok(Some(port)) => {
info!(
"[Host {current_host}/{total_hosts}] Found port '{port}' for '{mac_address}'"
);
switch_ports.push(SwitchPort {
interface: NetworkInterface {
name: network_interface.name.clone(),
mac_address,
speed_mbps: network_interface.speed_mbps,
mtu: network_interface.mtu,
},
port,
});
}
Ok(None) => debug!("No port found for '{mac_address}', skipping"),
Err(e) => {
return Err(InterpretError::new(format!(
"Failed to get port for host '{}': {}",
host.id, e
)));
}
}
}
Ok(switch_ports)
}
fn format_host_configuration(&self, configs: Vec<HostNetworkConfig>) -> Vec<String> {
let mut report = vec![
"Network Configuration Report".to_string(),
"------------------------------------------------------------------".to_string(),
];
for config in configs {
let host = self
.score
.hosts
.iter()
.find(|h| h.id == config.host_id)
.unwrap();
println!("[Host] {host}");
if config.switch_ports.is_empty() {
report.push(format!(
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
config.host_id
));
} else {
let mappings: Vec<String> = config
.switch_ports
.iter()
.map(|p| format!("[{} -> {}]", p.interface.name, p.port))
.collect();
report.push(format!(
"✅ Host {}: Bonded {} port(s) {}",
config.host_id,
config.switch_ports.len(),
mappings.join(", ")
));
}
}
report
.push("------------------------------------------------------------------".to_string());
report
}
}
#[async_trait]
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("HostNetworkConfigurationInterpret")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
if self.score.hosts.is_empty() {
return Ok(Outcome::noop("No hosts to configure".into()));
}
let host_count = self.score.hosts.len();
info!("Started network configuration for {host_count} host(s)...",);
info!("Setting up switch with sane defaults...");
topology
.setup_switch()
.await
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
info!("Switch ready");
let mut current_host = 1;
let mut host_configurations = vec![];
for host in &self.score.hosts {
let host_configuration = self
.configure_network_for_host(topology, host, &current_host, &host_count)
.await?;
host_configurations.push(host_configuration);
current_host += 1;
}
if current_host > 1 {
let details = self.format_host_configuration(host_configurations);
Ok(Outcome::success_with_details(
format!(
"Configured {}/{} host(s)",
current_host - 1,
self.score.hosts.len()
),
details,
))
} else {
Ok(Outcome::noop("No hosts configured".into()))
}
}
}
#[cfg(test)]
mod tests {
use assertor::*;
use harmony_types::{net::MacAddress, switch::PortLocation};
use lazy_static::lazy_static;
use crate::{
hardware::HostCategory,
topology::{
HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
},
};
use std::{
str::FromStr,
sync::{Arc, Mutex},
};
use super::*;
lazy_static! {
pub static ref HOST_ID: Id = Id::from_str("host-1").unwrap();
pub static ref ANOTHER_HOST_ID: Id = Id::from_str("host-2").unwrap();
pub static ref EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F1".to_string()).unwrap(),
name: "interface-1".into(),
speed_mbps: None,
mtu: 1,
};
pub static ref ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F2".to_string()).unwrap(),
name: "interface-2".into(),
speed_mbps: None,
mtu: 1,
};
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
name: "unknown-interface".into(),
speed_mbps: None,
mtu: 1,
};
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
}
#[tokio::test]
async fn should_setup_switch() {
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
let score = given_score(vec![host]);
let topology = TopologyWithSwitch::new();
let _ = score.interpret(&Inventory::empty(), &topology).await;
let switch_setup = topology.switch_setup.lock().unwrap();
assert_that!(*switch_setup).is_true();
}
#[tokio::test]
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
let score = given_score(vec![host]);
let topology = TopologyWithSwitch::new();
let _ = score.interpret(&Inventory::empty(), &topology).await;
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
assert_that!(*configured_host_networks).contains_exactly(vec![(
HOST_ID.clone(),
HostNetworkConfig {
host_id: HOST_ID.clone(),
switch_ports: vec![SwitchPort {
interface: EXISTING_INTERFACE.clone(),
port: PORT.clone(),
}],
},
)]);
}
#[tokio::test]
async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
let score = given_score(vec![given_host(
&HOST_ID,
vec![
EXISTING_INTERFACE.clone(),
ANOTHER_EXISTING_INTERFACE.clone(),
],
)]);
let topology = TopologyWithSwitch::new();
let _ = score.interpret(&Inventory::empty(), &topology).await;
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
assert_that!(*configured_host_networks).contains_exactly(vec![(
HOST_ID.clone(),
HostNetworkConfig {
host_id: HOST_ID.clone(),
switch_ports: vec![
SwitchPort {
interface: EXISTING_INTERFACE.clone(),
port: PORT.clone(),
},
SwitchPort {
interface: ANOTHER_EXISTING_INTERFACE.clone(),
port: ANOTHER_PORT.clone(),
},
],
},
)]);
}
#[tokio::test]
async fn multiple_hosts_should_create_one_bond_per_host() {
let score = given_score(vec![
given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
]);
let topology = TopologyWithSwitch::new();
let _ = score.interpret(&Inventory::empty(), &topology).await;
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
assert_that!(*configured_host_networks).contains_exactly(vec![
(
HOST_ID.clone(),
HostNetworkConfig {
host_id: HOST_ID.clone(),
switch_ports: vec![SwitchPort {
interface: EXISTING_INTERFACE.clone(),
port: PORT.clone(),
}],
},
),
(
ANOTHER_HOST_ID.clone(),
HostNetworkConfig {
host_id: ANOTHER_HOST_ID.clone(),
switch_ports: vec![SwitchPort {
interface: ANOTHER_EXISTING_INTERFACE.clone(),
port: ANOTHER_PORT.clone(),
}],
},
),
]);
}
#[tokio::test]
async fn port_not_found_for_mac_address_should_not_configure_interface() {
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
let topology = TopologyWithSwitch::new_port_not_found();
let _ = score.interpret(&Inventory::empty(), &topology).await;
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
assert_that!(*configured_host_networks).is_empty();
}
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
HostNetworkConfigurationScore { hosts }
}
fn given_host(id: &Id, network_interfaces: Vec<NetworkInterface>) -> PhysicalHost {
let network = network_interfaces.iter().map(given_interface).collect();
PhysicalHost {
id: id.clone(),
category: HostCategory::Server,
network,
storage: vec![],
labels: vec![],
memory_modules: vec![],
cpus: vec![],
}
}
fn given_interface(
interface: &NetworkInterface,
) -> harmony_inventory_agent::hwinfo::NetworkInterface {
harmony_inventory_agent::hwinfo::NetworkInterface {
name: interface.name.clone(),
mac_address: interface.mac_address,
speed_mbps: interface.speed_mbps,
is_up: true,
mtu: interface.mtu,
ipv4_addresses: vec![],
ipv6_addresses: vec![],
driver: "driver".into(),
firmware_version: None,
}
}
struct TopologyWithSwitch {
available_ports: Arc<Mutex<Vec<PortLocation>>>,
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
switch_setup: Arc<Mutex<bool>>,
}
impl TopologyWithSwitch {
fn new() -> Self {
Self {
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
configured_host_networks: Arc::new(Mutex::new(vec![])),
switch_setup: Arc::new(Mutex::new(false)),
}
}
fn new_port_not_found() -> Self {
Self {
available_ports: Arc::new(Mutex::new(vec![])),
configured_host_networks: Arc::new(Mutex::new(vec![])),
switch_setup: Arc::new(Mutex::new(false)),
}
}
}
#[async_trait]
impl Topology for TopologyWithSwitch {
fn name(&self) -> &str {
"SwitchWithPortTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
Ok(PreparationOutcome::Success { details: "".into() })
}
}
#[async_trait]
impl Switch for TopologyWithSwitch {
async fn setup_switch(&self) -> Result<(), SwitchError> {
let mut switch_configured = self.switch_setup.lock().unwrap();
*switch_configured = true;
Ok(())
}
async fn get_port_for_mac_address(
&self,
_mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError> {
let mut ports = self.available_ports.lock().unwrap();
if ports.is_empty() {
return Ok(None);
}
Ok(Some(ports.remove(0)))
}
async fn configure_host_network(
&self,
config: &HostNetworkConfig,
) -> Result<(), SwitchError> {
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
configured_host_networks.push((config.host_id.clone(), config.clone()));
Ok(())
}
}
}

View File

@@ -50,7 +50,7 @@
use crate::{ use crate::{
modules::okd::{ modules::okd::{
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore, OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore, OKDSetup04WorkersScore, OKDSetup05SanityCheckScore,
bootstrap_06_installation_report::OKDSetup06InstallationReportScore, bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
}, },
score::Score, score::Score,
@@ -65,7 +65,6 @@ impl OKDInstallationPipeline {
Box::new(OKDSetup01InventoryScore::new()), Box::new(OKDSetup01InventoryScore::new()),
Box::new(OKDSetup02BootstrapScore::new()), Box::new(OKDSetup02BootstrapScore::new()),
Box::new(OKDSetup03ControlPlaneScore::new()), Box::new(OKDSetup03ControlPlaneScore::new()),
Box::new(OKDSetupPersistNetworkBondScore::new()),
Box::new(OKDSetup04WorkersScore::new()), Box::new(OKDSetup04WorkersScore::new()),
Box::new(OKDSetup05SanityCheckScore::new()), Box::new(OKDSetup05SanityCheckScore::new()),
Box::new(OKDSetup06InstallationReportScore::new()), Box::new(OKDSetup06InstallationReportScore::new()),

View File

@@ -6,7 +6,6 @@ mod bootstrap_05_sanity_check;
mod bootstrap_06_installation_report; mod bootstrap_06_installation_report;
pub mod bootstrap_dhcp; pub mod bootstrap_dhcp;
pub mod bootstrap_load_balancer; pub mod bootstrap_load_balancer;
mod bootstrap_persist_network_bond;
pub mod dhcp; pub mod dhcp;
pub mod dns; pub mod dns;
pub mod installation; pub mod installation;
@@ -20,6 +19,3 @@ pub use bootstrap_03_control_plane::*;
pub use bootstrap_04_workers::*; pub use bootstrap_04_workers::*;
pub use bootstrap_05_sanity_check::*; pub use bootstrap_05_sanity_check::*;
pub use bootstrap_06_installation_report::*; pub use bootstrap_06_installation_report::*;
pub use bootstrap_persist_network_bond::*;
pub mod crd;
pub mod host_network;

View File

@@ -1,4 +1,3 @@
pub mod node_exporter;
mod shell; mod shell;
mod upgrade; mod upgrade;
pub use shell::*; pub use shell::*;

View File

@@ -1,70 +0,0 @@
use async_trait::async_trait;
use harmony_types::id::Id;
use log::info;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{Topology, node_exporter::NodeExporter},
};
#[derive(Debug, Clone, Serialize)]
pub struct NodeExporterScore {}
impl<T: Topology + NodeExporter> Score<T> for NodeExporterScore {
fn name(&self) -> String {
"NodeExporterScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(NodeExporterInterpret {})
}
}
#[derive(Debug)]
pub struct NodeExporterInterpret {}
#[async_trait]
impl<T: Topology + NodeExporter> Interpret<T> for NodeExporterInterpret {
async fn execute(
&self,
_inventory: &Inventory,
node_exporter: &T,
) -> Result<Outcome, InterpretError> {
info!(
"Making sure node exporter is initiailized: {:?}",
node_exporter.ensure_initialized().await?
);
info!("Applying Node Exporter configuration");
node_exporter.commit_config().await?;
info!("Reloading and restarting Node Exporter");
node_exporter.reload_restart().await?;
Ok(Outcome::success(format!(
"NodeExporter successfully configured"
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("NodeExporter")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -12,8 +12,7 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::C
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules; use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{ use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig, Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSecretKeyRef, GrafanaSpec, GrafanaDatasourceSpec, GrafanaSpec,
GrafanaValueFrom, GrafanaValueSource,
}; };
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{ use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
PrometheusRule, PrometheusRuleSpec, RuleGroup, PrometheusRule, PrometheusRuleSpec, RuleGroup,
@@ -40,7 +39,7 @@ use crate::{
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
use super::prometheus::PrometheusMonitoring; use super::prometheus::PrometheusApplicationMonitoring;
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct K8sPrometheusCRDAlertingScore { pub struct K8sPrometheusCRDAlertingScore {
@@ -50,7 +49,7 @@ pub struct K8sPrometheusCRDAlertingScore {
pub prometheus_rules: Vec<RuleGroup>, pub prometheus_rules: Vec<RuleGroup>,
} }
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Score<T> impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
for K8sPrometheusCRDAlertingScore for K8sPrometheusCRDAlertingScore
{ {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
@@ -76,7 +75,7 @@ pub struct K8sPrometheusCRDAlertingInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Interpret<T> impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
for K8sPrometheusCRDAlertingInterpret for K8sPrometheusCRDAlertingInterpret
{ {
async fn execute( async fn execute(
@@ -467,13 +466,10 @@ impl K8sPrometheusCRDAlertingInterpret {
match_labels: label.clone(), match_labels: label.clone(),
match_expressions: vec![], match_expressions: vec![],
}; };
let mut json_data = BTreeMap::new();
json_data.insert("timeInterval".to_string(), "5s".to_string());
let namespace = self.sender.namespace.clone(); let namespace = self.sender.namespace.clone();
let json_data = GrafanaDatasourceJsonData {
time_interval: Some("5s".to_string()),
http_header_name1: None,
tls_skip_verify: Some(true),
oauth_pass_thru: Some(true),
};
let json = build_default_dashboard(&namespace); let json = build_default_dashboard(&namespace);
let graf_data_source = GrafanaDatasource { let graf_data_source = GrafanaDatasource {
@@ -499,11 +495,7 @@ impl K8sPrometheusCRDAlertingInterpret {
"http://prometheus-operated.{}.svc.cluster.local:9090", "http://prometheus-operated.{}.svc.cluster.local:9090",
self.sender.namespace.clone() self.sender.namespace.clone()
), ),
secure_json_data: None,
is_default: None,
editable: None,
}, },
values_from: None,
}, },
}; };
@@ -524,9 +516,7 @@ impl K8sPrometheusCRDAlertingInterpret {
spec: GrafanaDashboardSpec { spec: GrafanaDashboardSpec {
resync_period: Some("30s".to_string()), resync_period: Some("30s".to_string()),
instance_selector: labels.clone(), instance_selector: labels.clone(),
json: Some(json), json,
grafana_com: None,
datasources: None,
}, },
}; };

View File

@@ -9,17 +9,11 @@ use crate::{
}; };
#[async_trait] #[async_trait]
pub trait PrometheusMonitoring<S: AlertSender> { pub trait PrometheusApplicationMonitoring<S: AlertSender> {
async fn install_prometheus( async fn install_prometheus(
&self, &self,
sender: &S, sender: &S,
inventory: &Inventory, inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>, receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
) -> Result<PreparationOutcome, PreparationError>; ) -> Result<PreparationOutcome, PreparationError>;
async fn ensure_prometheus_operator(
&self,
sender: &S,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError>;
} }

View File

@@ -38,7 +38,7 @@ use crate::{
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
use super::prometheus::PrometheusMonitoring; use super::prometheus::PrometheusApplicationMonitoring;
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct RHOBAlertingScore { pub struct RHOBAlertingScore {
@@ -48,8 +48,8 @@ pub struct RHOBAlertingScore {
pub prometheus_rules: Vec<RuleGroup>, pub prometheus_rules: Vec<RuleGroup>,
} }
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Score<T> impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
for RHOBAlertingScore Score<T> for RHOBAlertingScore
{ {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(RHOBAlertingInterpret { Box::new(RHOBAlertingInterpret {
@@ -74,8 +74,8 @@ pub struct RHOBAlertingInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Interpret<T> impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
for RHOBAlertingInterpret Interpret<T> for RHOBAlertingInterpret
{ {
async fn execute( async fn execute(
&self, &self,

View File

@@ -4,7 +4,7 @@ use std::{
}; };
use async_trait::async_trait; use async_trait::async_trait;
use log::{debug, warn}; use log::{info, warn};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::time::sleep; use tokio::time::sleep;
@@ -19,8 +19,8 @@ use harmony_types::id::Id;
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct CephRemoveOsd { pub struct CephRemoveOsd {
pub osd_deployment_name: String, osd_deployment_name: String,
pub rook_ceph_namespace: String, rook_ceph_namespace: String,
} }
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd { impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
@@ -54,17 +54,18 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
self.verify_deployment_scaled(client.clone()).await?; self.verify_deployment_scaled(client.clone()).await?;
self.delete_deployment(client.clone()).await?; self.delete_deployment(client.clone()).await?;
self.verify_deployment_deleted(client.clone()).await?; self.verify_deployment_deleted(client.clone()).await?;
self.purge_ceph_osd(client.clone()).await?;
self.verify_ceph_osd_removal(client.clone()).await?;
let osd_id_full = self.get_ceph_osd_id().unwrap(); let osd_id_full = self.get_ceph_osd_id().unwrap();
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
.await?;
Ok(Outcome::success(format!( Ok(Outcome::success(format!(
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}", "Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
osd_id_full, self.score.osd_deployment_name osd_id_full, self.score.osd_deployment_name
))) )))
} }
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::CephRemoveOsd todo!()
} }
fn get_version(&self) -> Version { fn get_version(&self) -> Version {
@@ -81,7 +82,7 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
} }
impl CephRemoveOsdInterpret { impl CephRemoveOsdInterpret {
pub fn get_ceph_osd_id_numeric(&self) -> Result<String, InterpretError> { pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
let osd_id_numeric = self let osd_id_numeric = self
.score .score
.osd_deployment_name .osd_deployment_name
@@ -93,14 +94,9 @@ impl CephRemoveOsdInterpret {
self.score.osd_deployment_name self.score.osd_deployment_name
)) ))
})?; })?;
Ok(osd_id_numeric.to_string())
}
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
let osd_id_full = format!("osd.{}", osd_id_numeric); let osd_id_full = format!("osd.{}", osd_id_numeric);
debug!( info!(
"Targeting Ceph OSD: {} (parsed from deployment {})", "Targeting Ceph OSD: {} (parsed from deployment {})",
osd_id_full, self.score.osd_deployment_name osd_id_full, self.score.osd_deployment_name
); );
@@ -112,7 +108,6 @@ impl CephRemoveOsdInterpret {
&self, &self,
client: Arc<K8sClient>, client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
debug!("verifying toolbox exists");
let toolbox_dep = "rook-ceph-tools".to_string(); let toolbox_dep = "rook-ceph-tools".to_string();
match client match client
@@ -154,7 +149,7 @@ impl CephRemoveOsdInterpret {
&self, &self,
client: Arc<K8sClient>, client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
debug!( info!(
"Scaling down OSD deployment: {}", "Scaling down OSD deployment: {}",
self.score.osd_deployment_name self.score.osd_deployment_name
); );
@@ -177,7 +172,7 @@ impl CephRemoveOsdInterpret {
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer(); let (timeout, interval, start) = self.build_timer();
debug!("Waiting for OSD deployment to scale down to 0 replicas"); info!("Waiting for OSD deployment to scale down to 0 replicas");
loop { loop {
let dep = client let dep = client
.get_deployment( .get_deployment(
@@ -185,9 +180,11 @@ impl CephRemoveOsdInterpret {
Some(&self.score.rook_ceph_namespace), Some(&self.score.rook_ceph_namespace),
) )
.await?; .await?;
if let Some(deployment) = dep { if let Some(deployment) = dep {
if let Some(status) = deployment.status { if let Some(status) = deployment.status {
if status.replicas == None && status.ready_replicas == None { if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
{
return Ok(Outcome::success( return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(), "Deployment successfully scaled down.".to_string(),
)); ));
@@ -215,7 +212,7 @@ impl CephRemoveOsdInterpret {
&self, &self,
client: Arc<K8sClient>, client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
debug!( info!(
"Deleting OSD deployment: {}", "Deleting OSD deployment: {}",
self.score.osd_deployment_name self.score.osd_deployment_name
); );
@@ -237,7 +234,7 @@ impl CephRemoveOsdInterpret {
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer(); let (timeout, interval, start) = self.build_timer();
debug!("Verifying OSD deployment deleted"); info!("Waiting for OSD deployment to scale down to 0 replicas");
loop { loop {
let dep = client let dep = client
.get_deployment( .get_deployment(
@@ -247,7 +244,7 @@ impl CephRemoveOsdInterpret {
.await?; .await?;
if dep.is_none() { if dep.is_none() {
debug!( info!(
"Deployment {} successfully deleted.", "Deployment {} successfully deleted.",
self.score.osd_deployment_name self.score.osd_deployment_name
); );
@@ -279,10 +276,12 @@ impl CephRemoveOsdInterpret {
Ok(tree) Ok(tree)
} }
pub async fn purge_ceph_osd(&self, client: Arc<K8sClient>) -> Result<Outcome, InterpretError> { pub async fn purge_ceph_osd(
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap(); &self,
let osd_id_full = self.get_ceph_osd_id().unwrap(); client: Arc<K8sClient>,
debug!( osd_id_full: &str,
) -> Result<Outcome, InterpretError> {
info!(
"Purging OSD {} from Ceph cluster and removing its auth key", "Purging OSD {} from Ceph cluster and removing its auth key",
osd_id_full osd_id_full
); );
@@ -292,9 +291,8 @@ impl CephRemoveOsdInterpret {
"app".to_string(), "app".to_string(),
Some(&self.score.rook_ceph_namespace), Some(&self.score.rook_ceph_namespace),
vec![ vec![
"sh", format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
"-c", format!("ceph auth del osd.{osd_id_full}").as_str(),
format!("ceph osd purge {osd_id_numeric} --yes-i-really-mean-it && ceph auth del {osd_id_full}").as_str(),
], ],
) )
.await?; .await?;
@@ -307,10 +305,10 @@ impl CephRemoveOsdInterpret {
pub async fn verify_ceph_osd_removal( pub async fn verify_ceph_osd_removal(
&self, &self,
client: Arc<K8sClient>, client: Arc<K8sClient>,
osd_id_full: &str,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer(); let (timeout, interval, start) = self.build_timer();
let osd_id_full = self.get_ceph_osd_id().unwrap(); info!(
debug!(
"Verifying OSD {} has been removed from the Ceph tree...", "Verifying OSD {} has been removed from the Ceph tree...",
osd_id_full osd_id_full
); );
@@ -320,7 +318,7 @@ impl CephRemoveOsdInterpret {
"rook-ceph-tools".to_string(), "rook-ceph-tools".to_string(),
"app".to_string(), "app".to_string(),
Some(&self.score.rook_ceph_namespace), Some(&self.score.rook_ceph_namespace),
vec!["sh", "-c", "ceph osd tree -f json"], vec!["ceph osd tree -f json"],
) )
.await?; .await?;
let tree = let tree =

View File

@@ -1,2 +1,2 @@
pub mod ceph_remove_osd_score; pub mod ceph_osd_replacement_score;
pub mod ceph_validate_health_score; pub mod ceph_validate_health_score;

View File

@@ -40,7 +40,7 @@ pub fn init() {
HarmonyEvent::HarmonyFinished => { HarmonyEvent::HarmonyFinished => {
if !details.is_empty() { if !details.is_empty() {
println!( println!(
"\n{} All done! Here's a few info for you:", "\n{} All done! Here's what's next for you:",
theme::EMOJI_SUMMARY theme::EMOJI_SUMMARY
); );
for detail in details.iter() { for detail in details.iter() {

View File

@@ -54,9 +54,6 @@ struct DeployArgs {
#[arg(long = "profile", short = 'p', default_value = "dev")] #[arg(long = "profile", short = 'p', default_value = "dev")]
harmony_profile: HarmonyProfile, harmony_profile: HarmonyProfile,
#[arg(long = "dry-run", short = 'd', default_value = "false")]
dry_run: bool,
} }
#[derive(Args, Clone, Debug)] #[derive(Args, Clone, Debug)]
@@ -181,7 +178,6 @@ async fn main() {
command command
.env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}")) .env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}"))
.env("HARMONY_PROFILE", format!("{}", args.harmony_profile)) .env("HARMONY_PROFILE", format!("{}", args.harmony_profile))
.env("HARMONY_DRY_RUN", format!("{}", args.dry_run))
.arg("-y") .arg("-y")
.arg("-a"); .arg("-a");

View File

@@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize};
/// ///
/// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per /// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per
/// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html /// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Id { pub struct Id {
value: String, value: String,
} }

View File

@@ -1,3 +1,2 @@
pub mod id; pub mod id;
pub mod net; pub mod net;
pub mod switch;

View File

@@ -1,6 +1,6 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct MacAddress(pub [u8; 6]); pub struct MacAddress(pub [u8; 6]);
impl MacAddress { impl MacAddress {
@@ -41,7 +41,7 @@ impl TryFrom<String> for MacAddress {
bytes[i] = u8::from_str_radix(part, 16).map_err(|_| { bytes[i] = u8::from_str_radix(part, 16).map_err(|_| {
std::io::Error::new( std::io::Error::new(
std::io::ErrorKind::InvalidInput, std::io::ErrorKind::InvalidInput,
format!("Invalid hex value in part {i}: '{part}'"), format!("Invalid hex value in part {}: '{}'", i, part),
) )
})?; })?;
} }
@@ -106,8 +106,8 @@ impl Serialize for Url {
impl std::fmt::Display for Url { impl std::fmt::Display for Url {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
Url::LocalFolder(path) => write!(f, "{path}"), Url::LocalFolder(path) => write!(f, "{}", path),
Url::Url(url) => write!(f, "{url}"), Url::Url(url) => write!(f, "{}", url),
} }
} }
} }

Some files were not shown because too many files have changed in this diff Show More