Compare commits

..

15 Commits

Author SHA1 Message Date
05f4769ffe Merge branch 'master' into okd_enable_user_workload_monitoring
Some checks failed
Run Check Script / check (pull_request) Has been cancelled
2025-10-21 13:48:33 +00:00
ed7f81aa1f fix(opnsense-config): ensure load balancer service configuration is idempotent (#129)
Some checks are pending
Run Check Script / check (push) Waiting to run
Compile and package harmony_composer / package_harmony_composer (push) Waiting to run
The previous implementation blindly added HAProxy components without checking for existing configurations on the same port, which caused duplicate entries and errors when a service was updated.

This commit refactors the logic to a robust "remove-then-add" strategy. The configure_service method now finds and removes any existing frontend and its dependent components (backend, servers, health check) before adding the new, complete service definition.

This change makes the process fully idempotent, preventing configuration drift and ensuring a predictable state.

Co-authored-by: Ian Letourneau <letourneau.ian@gmail.com>
Reviewed-on: #129
2025-10-20 19:18:49 +00:00
2d891e4463 Merge pull request 'feat(host_network): configure bonds and port channels' (#169) from config-host-network into master
Some checks failed
Run Check Script / check (push) Has been cancelled
Compile and package harmony_composer / package_harmony_composer (push) Has been cancelled
Reviewed-on: #169
2025-10-16 18:24:58 +00:00
f66e58b9ca Merge branch 'master' into config-host-network
Some checks failed
Run Check Script / check (pull_request) Has been cancelled
2025-10-16 18:24:34 +00:00
ea39d93aa7 feat(host_network): configure bonds on the host and switch port channels
Some checks failed
Run Check Script / check (pull_request) Has been cancelled
2025-10-16 14:23:41 -04:00
6989d208cf Merge pull request 'feat(switch/brocade): Implement client to interact with Brocade switches' (#168) from brocade-switch-client into master
Some checks are pending
Run Check Script / check (push) Waiting to run
Compile and package harmony_composer / package_harmony_composer (push) Waiting to run
Reviewed-on: #168
2025-10-16 18:23:01 +00:00
c0bd8007c7 feat(switch/brocade): Implement client to interact with Brocade Switch
All checks were successful
Run Check Script / check (pull_request) Successful in 1m5s
* Expose a high-level `brocade::init()` function to connect to a Brocade switch and automatically pick the best implementation based on its OS and version
* Implement a client for Brocade switches running on Network Operating System (NOS)
* Implement a client for older Brocade switches running on FastIron (partial implementation)

The architecture for the library is based on 3 layers:
1. The `BrocadeClient` trait to describe the available capabilities to
   interact with a Brocade switch. It is partly opinionated in order to
   offer higher level features to group multiple commands into a single
   function (e.g. create a port channel). Its implementations are
   basically just the commands to run on the switch and the functions to
   parse the output.
2. The `BrocadeShell` struct to make it easier to authenticate, send commands, and interact with the switch.
3. The `ssh` module to actually connect to the switch over SSH and execute the commands.

With time, we will add support for more Brocade switches and their various OS/versions. If needed, shared behavior could be extracted into a separate module to make it easier to add new implementations.
2025-10-15 15:28:24 -04:00
cf576192a8 Merge pull request 'feat: Add openbao example, open-source fork of vault' (#162) from feat/openbao into master
All checks were successful
Run Check Script / check (push) Successful in 1m33s
Compile and package harmony_composer / package_harmony_composer (push) Successful in 6m51s
Reviewed-on: #162
2025-10-03 00:28:50 +00:00
f7e9669009 Merge branch 'master' into feat/openbao
All checks were successful
Run Check Script / check (pull_request) Successful in 1m23s
2025-10-02 21:11:44 +00:00
f65e16df7b feat: Remove unused helm command, refactor url to use hurl in some more places 2025-09-30 11:18:08 -04:00
cbbaae2ac8 okd_enable_user_workload_monitoring (#160)
Reviewed-on: #160
Co-authored-by: Willem <wrolleman@nationtech.io>
Co-committed-by: Willem <wrolleman@nationtech.io>
2025-09-29 14:32:38 +00:00
36b909ee20 fix: moved wait_for_pod_ready to K8sClient, switched rust yaml string to k8s_openapi resource ConfigMap 2025-09-29 09:36:01 -04:00
4a500e4eb7 feat: Add openbao example, open-source fork of vault
Some checks failed
Run Check Script / check (pull_request) Failing after 5s
2025-09-24 21:54:32 -04:00
cdf75faa8f feat(monitoring): tested and modified ensure pod ready to wait for pod ready, which prevents check from failing immediately and gives time for the resource to be created
All checks were successful
Run Check Script / check (pull_request) Successful in 49s
2025-09-19 15:11:28 -04:00
21e51b8d80 feat(monitoring): score to enable user-workload-monitoring within okd 2025-09-19 10:15:53 -04:00
36 changed files with 1695 additions and 587 deletions

42
Cargo.lock generated
View File

@@ -680,11 +680,13 @@ version = "0.1.0"
dependencies = [
"async-trait",
"env_logger",
"harmony_secret",
"harmony_types",
"log",
"regex",
"russh",
"russh-keys",
"serde",
"tokio",
]
@@ -1818,6 +1820,18 @@ dependencies = [
"url",
]
[[package]]
name = "example-openbao"
version = "0.1.0"
dependencies = [
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_types",
"tokio",
"url",
]
[[package]]
name = "example-opnsense"
version = "0.1.0"
@@ -1904,6 +1918,8 @@ dependencies = [
"env_logger",
"harmony",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_tui",
"harmony_types",
"log",
@@ -2363,6 +2379,7 @@ dependencies = [
"once_cell",
"opnsense-config",
"opnsense-config-xml",
"option-ext",
"pretty_assertions",
"reqwest 0.11.27",
"russh",
@@ -2429,17 +2446,6 @@ dependencies = [
"tokio",
]
[[package]]
name = "harmony_derive"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d138bbb32bb346299c5f95fbb53532313f39927cb47c411c99c634ef8665ef7"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "harmony_inventory_agent"
version = "0.1.0"
@@ -3886,19 +3892,6 @@ dependencies = [
"web-time",
]
[[package]]
name = "okd_host_network"
version = "0.1.0"
dependencies = [
"harmony",
"harmony_cli",
"harmony_derive",
"harmony_inventory_agent",
"harmony_macros",
"harmony_types",
"tokio",
]
[[package]]
name = "once_cell"
version = "1.21.3"
@@ -3927,6 +3920,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
name = "opnsense-config"
version = "0.1.0"
dependencies = [
"assertor",
"async-trait",
"chrono",
"env_logger",

View File

@@ -15,7 +15,8 @@ members = [
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret",
"adr/agent_discovery/mdns", "brocade",
"adr/agent_discovery/mdns",
"brocade",
]
[workspace.package]

View File

@@ -14,3 +14,5 @@ tokio.workspace = true
log.workspace = true
env_logger.workspace = true
regex = "1.11.3"
harmony_secret = { path = "../harmony_secret" }
serde.workspace = true

View File

@@ -1,54 +1,70 @@
use std::net::{IpAddr, Ipv4Addr};
use brocade::BrocadeOptions;
use harmony_secret::{Secret, SecretManager};
use harmony_types::switch::PortLocation;
use serde::{Deserialize, Serialize};
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
struct BrocadeSwitchAuth {
username: String,
password: String,
}
#[tokio::main]
async fn main() {
env_logger::init();
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
let switch_addresses = vec![ip];
let brocade = brocade::init(&switch_addresses, 22, "admin", "password", None)
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.expect("Brocade client failed to connect");
.unwrap();
let brocade = brocade::init(
&switch_addresses,
22,
&config.username,
&config.password,
Some(BrocadeOptions {
dry_run: true,
..Default::default()
}),
)
.await
.expect("Brocade client failed to connect");
let entries = brocade.get_stack_topology().await.unwrap();
println!("Stack topology: {entries:#?}");
let entries = brocade.get_interfaces().await.unwrap();
println!("Interfaces: {entries:#?}");
let version = brocade.version().await.unwrap();
println!("Version: {version:?}");
println!("--------------");
println!("Showing MAC Address table...");
let mac_adddresses = brocade.show_mac_address_table().await.unwrap();
let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
println!("VLAN\tMAC\t\t\tPORT");
for mac in mac_adddresses {
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
}
println!("--------------");
let channel_name = "HARMONY_LAG";
println!("Clearing port channel '{channel_name}'...");
let channel_name = "1";
brocade.clear_port_channel(channel_name).await.unwrap();
println!("Cleared");
println!("--------------");
println!("Finding next available channel...");
let channel_id = brocade.find_available_channel_id().await.unwrap();
println!("Channel id: {channel_id}");
println!("--------------");
let channel_name = "HARMONY_LAG";
let ports = [PortLocation(1, 1, 3), PortLocation(1, 1, 4)];
println!("Creating port channel '{channel_name}' with ports {ports:?}'...");
let ports = [PortLocation(2, 0, 35)];
brocade
.create_port_channel(channel_id, channel_name, &ports)
.await
.unwrap();
println!("Created");
}

View File

@@ -1,7 +1,7 @@
use super::BrocadeClient;
use crate::{
BrocadeInfo, Error, ExecutionMode, MacAddressEntry, PortChannelId, parse_brocade_mac_address,
shell::BrocadeShell,
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
};
use async_trait::async_trait;
@@ -11,12 +11,22 @@ use regex::Regex;
use std::{collections::HashSet, str::FromStr};
pub struct FastIronClient {
pub shell: BrocadeShell,
pub version: BrocadeInfo,
shell: BrocadeShell,
version: BrocadeInfo,
}
impl FastIronClient {
pub fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
shell.before_all(vec!["skip-page-display".into()]);
shell.after_all(vec!["page".into()]);
Self {
shell,
version: version_info,
}
}
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
debug!("[Brocade] Parsing mac address entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 3 {
@@ -49,10 +59,25 @@ impl FastIronClient {
}
}
pub fn build_port_channel_commands(
fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
debug!("[Brocade] Parsing stack port entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
return None;
}
let local_port = PortLocation::from_str(parts[0]).ok()?;
Some(Ok(InterSwitchLink {
local_port,
remote_port: None,
}))
}
fn build_port_channel_commands(
&self,
channel_id: PortChannelId,
channel_name: &str,
channel_id: u8,
ports: &[PortLocation],
) -> Vec<String> {
let mut commands = vec![
@@ -80,7 +105,7 @@ impl BrocadeClient for FastIronClient {
Ok(self.version.clone())
}
async fn show_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
info!("[Brocade] Showing MAC address table...");
let output = self
@@ -95,6 +120,30 @@ impl BrocadeClient for FastIronClient {
.collect()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
let output = self
.shell
.run_command("show interface stack-ports", crate::ExecutionMode::Regular)
.await?;
output
.lines()
.skip(1)
.filter_map(|line| self.parse_stack_port_entry(line))
.collect()
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
todo!()
}
async fn configure_interfaces(
&self,
_interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
todo!()
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
info!("[Brocade] Finding next available channel id...");
@@ -135,7 +184,7 @@ impl BrocadeClient for FastIronClient {
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
);
let commands = self.build_port_channel_commands(channel_name, channel_id, ports);
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
self.shell
.run_commands(commands, ExecutionMode::Privileged)
.await?;
@@ -145,7 +194,7 @@ impl BrocadeClient for FastIronClient {
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
debug!("[Brocade] Clearing port-channel: {channel_name}");
info!("[Brocade] Clearing port-channel: {channel_name}");
let commands = vec![
"configure terminal".to_string(),
@@ -156,6 +205,7 @@ impl BrocadeClient for FastIronClient {
.run_commands(commands, ExecutionMode::Privileged)
.await?;
info!("[Brocade] Port-channel '{channel_name}' cleared.");
Ok(())
}
}

View File

@@ -4,6 +4,7 @@ use std::{
time::Duration,
};
use crate::network_operating_system::NetworkOperatingSystemClient;
use crate::{
fast_iron::FastIronClient,
shell::{BrocadeSession, BrocadeShell},
@@ -15,6 +16,7 @@ use harmony_types::switch::{PortDeclaration, PortLocation};
use regex::Regex;
mod fast_iron;
mod network_operating_system;
mod shell;
mod ssh;
@@ -36,7 +38,7 @@ pub struct TimeoutConfig {
impl Default for TimeoutConfig {
fn default() -> Self {
Self {
shell_ready: Duration::from_secs(3),
shell_ready: Duration::from_secs(10),
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
cleanup: Duration::from_secs(10),
message_wait: Duration::from_millis(500),
@@ -71,6 +73,70 @@ pub struct MacAddressEntry {
pub type PortChannelId = u8;
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
///
/// This structure provides a standardized view of the topology regardless of the
/// underlying Brocade OS configuration (stacking vs. fabric).
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct InterSwitchLink {
/// The local port on the switch where the topology command was run.
pub local_port: PortLocation,
/// The port on the directly connected neighboring switch.
pub remote_port: Option<PortLocation>,
}
/// Represents the key running configuration status of a single switch interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct InterfaceInfo {
/// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
pub name: String,
/// The physical location of the interface.
pub port_location: PortLocation,
/// The parsed type and name prefix of the interface.
pub interface_type: InterfaceType,
/// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
pub operating_mode: Option<PortOperatingMode>,
/// Indicates the current state of the interface.
pub status: InterfaceStatus,
}
/// Categorizes the functional type of a switch interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum InterfaceType {
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
Ethernet(String),
}
impl fmt::Display for InterfaceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InterfaceType::Ethernet(name) => write!(f, "{name}"),
}
}
}
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PortOperatingMode {
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
Fabric,
/// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
Trunk,
/// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
Access,
}
/// Defines the possible status of an interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum InterfaceStatus {
/// The interface is connected.
Connected,
/// The interface is not connected and is not expected to be.
NotConnected,
/// The interface is not connected but is expected to be (configured with `no shutdown`).
SfpAbsent,
}
pub async fn init(
ip_addresses: &[IpAddr],
port: u16,
@@ -87,23 +153,81 @@ pub async fn init(
.await?;
Ok(match version_info.os {
BrocadeOs::FastIron => Box::new(FastIronClient {
shell,
version: version_info,
}),
BrocadeOs::NetworkOperatingSystem => todo!(),
BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
BrocadeOs::NetworkOperatingSystem => {
Box::new(NetworkOperatingSystemClient::init(shell, version_info))
}
BrocadeOs::Unknown => todo!(),
})
}
#[async_trait]
pub trait BrocadeClient {
/// Retrieves the operating system and version details from the connected Brocade switch.
///
/// This is typically the first call made after establishing a connection to determine
/// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
///
/// # Returns
///
/// A `BrocadeInfo` structure containing parsed OS type and version string.
async fn version(&self) -> Result<BrocadeInfo, Error>;
async fn show_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
/// Retrieves the dynamically learned MAC address table from the switch.
///
/// This is crucial for discovering where specific network endpoints (MAC addresses)
/// are currently located on the physical ports.
///
/// # Returns
///
/// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
/// and the associated port name/index.
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
/// Derives the physical connections used to link multiple switches together
/// to form a single logical entity (stack, fabric, etc.).
///
/// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
/// to return a standardized view of the topology.
///
/// # Returns
///
/// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
/// If the switch is not stacked, returns an empty vector.
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
/// Retrieves the status for all interfaces
///
/// # Returns
///
/// A vector of `InterfaceInfo` structures.
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
async fn configure_interfaces(
&self,
interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error>;
/// Scans the existing configuration to find the next available (unused)
/// Port-Channel ID (`lag` or `trunk`) for assignment.
///
/// # Returns
///
/// The smallest, unassigned `PortChannelId` within the supported range.
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
/// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
/// using the specified channel ID and ports.
///
/// The resulting configuration must be persistent (saved to startup-config).
/// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
///
/// # Parameters
///
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
async fn create_port_channel(
&self,
channel_id: PortChannelId,
@@ -111,6 +235,15 @@ pub trait BrocadeClient {
ports: &[PortLocation],
) -> Result<(), Error>;
/// Removes all configuration associated with the specified Port-Channel name.
///
/// This operation should be idempotent; attempting to clear a non-existent
/// channel should succeed (or return a benign error).
///
/// # Parameters
///
/// * `channel_name`: The name of the Port-Channel (LAG) to delete.
///
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
}

View File

@@ -0,0 +1,306 @@
use std::str::FromStr;
use async_trait::async_trait;
use harmony_types::switch::{PortDeclaration, PortLocation};
use log::{debug, info};
use crate::{
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
parse_brocade_mac_address, shell::BrocadeShell,
};
pub struct NetworkOperatingSystemClient {
shell: BrocadeShell,
version: BrocadeInfo,
}
impl NetworkOperatingSystemClient {
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
shell.before_all(vec!["terminal length 0".into()]);
Self {
shell,
version: version_info,
}
}
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
debug!("[Brocade] Parsing mac address entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
return None;
}
let (vlan, mac_address, port) = match parts.len() {
5 => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[4].to_string(),
),
_ => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[5].to_string(),
),
};
let port =
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
match port {
Ok(p) => Some(Ok(MacAddressEntry {
vlan,
mac_address,
port: p,
})),
Err(e) => Some(Err(e)),
}
}
fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
debug!("[Brocade] Parsing inter switch link entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
return None;
}
let local_port = PortLocation::from_str(parts[2]).ok()?;
let remote_port = PortLocation::from_str(parts[5]).ok()?;
Some(Ok(InterSwitchLink {
local_port,
remote_port: Some(remote_port),
}))
}
fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
debug!("[Brocade] Parsing interface status entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 6 {
return None;
}
let interface_type = match parts[0] {
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
_ => return None,
};
let port_location = PortLocation::from_str(parts[1]).ok()?;
let status = match parts[2] {
"connected" => InterfaceStatus::Connected,
"notconnected" => InterfaceStatus::NotConnected,
"sfpAbsent" => InterfaceStatus::SfpAbsent,
_ => return None,
};
let operating_mode = match parts[3] {
"ISL" => Some(PortOperatingMode::Fabric),
"Trunk" => Some(PortOperatingMode::Trunk),
"Access" => Some(PortOperatingMode::Access),
"--" => None,
_ => return None,
};
Some(Ok(InterfaceInfo {
name: format!("{} {}", interface_type, port_location),
port_location,
interface_type,
operating_mode,
status,
}))
}
}
#[async_trait]
impl BrocadeClient for NetworkOperatingSystemClient {
async fn version(&self) -> Result<BrocadeInfo, Error> {
Ok(self.version.clone())
}
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
let output = self
.shell
.run_command("show mac-address-table", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(1)
.filter_map(|line| self.parse_mac_entry(line))
.collect()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
let output = self
.shell
.run_command("show fabric isl", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(6)
.filter_map(|line| self.parse_inter_switch_link_entry(line))
.collect()
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
let output = self
.shell
.run_command(
"show interface status rbridge-id all",
ExecutionMode::Regular,
)
.await?;
output
.lines()
.skip(2)
.filter_map(|line| self.parse_interface_status_entry(line))
.collect()
}
async fn configure_interfaces(
&self,
interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
let mut commands = vec!["configure terminal".to_string()];
for interface in interfaces {
commands.push(format!("interface {}", interface.0));
match interface.1 {
PortOperatingMode::Fabric => {
commands.push("fabric isl enable".into());
commands.push("fabric trunk enable".into());
}
PortOperatingMode::Trunk => {
commands.push("switchport".into());
commands.push("switchport mode trunk".into());
commands.push("no spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
}
PortOperatingMode::Access => {
commands.push("switchport".into());
commands.push("switchport mode access".into());
commands.push("switchport access vlan 1".into());
commands.push("no spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
}
}
commands.push("no shutdown".into());
commands.push("exit".into());
}
commands.push("write memory".into());
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Interfaces configured.");
Ok(())
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
info!("[Brocade] Finding next available channel id...");
let output = self
.shell
.run_command("show port-channel", ExecutionMode::Regular)
.await?;
let used_ids: Vec<u8> = output
.lines()
.skip(6)
.filter_map(|line| {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 8 {
return None;
}
u8::from_str(parts[0]).ok()
})
.collect();
let mut next_id: u8 = 1;
loop {
if !used_ids.contains(&next_id) {
break;
}
next_id += 1;
}
info!("[Brocade] Found channel id: {next_id}");
Ok(next_id)
}
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Result<(), Error> {
info!(
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
);
let interfaces = self.get_interfaces().await?;
let mut commands = vec![
"configure terminal".into(),
format!("interface port-channel {}", channel_id),
"no shutdown".into(),
"exit".into(),
];
for port in ports {
let interface = interfaces.iter().find(|i| i.port_location == *port);
let Some(interface) = interface else {
continue;
};
commands.push(format!("interface {}", interface.name));
commands.push("no switchport".into());
commands.push("no ip address".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
commands.push(format!("channel-group {channel_id} mode active"));
commands.push("no shutdown".into());
commands.push("exit".into());
}
commands.push("write memory".into());
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Port-channel '{channel_name}' configured.");
Ok(())
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
info!("[Brocade] Clearing port-channel: {channel_name}");
let commands = vec![
"configure terminal".into(),
format!("no interface port-channel {}", channel_name),
"exit".into(),
"write memory".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Port-channel '{channel_name}' cleared.");
Ok(())
}
}

View File

@@ -14,11 +14,13 @@ use russh::ChannelMsg;
use tokio::time::timeout;
pub struct BrocadeShell {
pub ip: IpAddr,
pub port: u16,
pub username: String,
pub password: String,
pub options: BrocadeOptions,
ip: IpAddr,
port: u16,
username: String,
password: String,
options: BrocadeOptions,
before_all_commands: Vec<String>,
after_all_commands: Vec<String>,
}
impl BrocadeShell {
@@ -41,6 +43,8 @@ impl BrocadeShell {
port,
username: username.to_string(),
password: password.to_string(),
before_all_commands: vec![],
after_all_commands: vec![],
options,
})
}
@@ -66,14 +70,22 @@ impl BrocadeShell {
>,
{
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = callback(&mut session).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = session.run_command(command).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
@@ -84,10 +96,22 @@ impl BrocadeShell {
mode: ExecutionMode,
) -> Result<(), Error> {
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = session.run_commands(commands).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub fn before_all(&mut self, commands: Vec<String>) {
self.before_all_commands = commands;
}
pub fn after_all(&mut self, commands: Vec<String>) {
self.after_all_commands = commands;
}
}
pub struct BrocadeSession {
@@ -148,6 +172,10 @@ impl BrocadeSession {
}
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
if self.should_skip_command(command) {
return Ok(String::new());
}
debug!("[Brocade] Running command: '{command}'...");
self.channel
@@ -170,7 +198,15 @@ impl BrocadeSession {
Ok(())
}
pub async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
fn should_skip_command(&self, command: &str) -> bool {
if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
return true;
}
false
}
async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
let mut output = Vec::new();
let start = Instant::now();
let read_timeout = Duration::from_millis(500);
@@ -222,7 +258,7 @@ impl BrocadeSession {
Ok(output)
}
pub fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
const ERROR_PATTERNS: &[&str] = &[
"invalid input",
"syntax error",
@@ -254,7 +290,7 @@ impl BrocadeSession {
}
}
pub async fn wait_for_shell_ready(
async fn wait_for_shell_ready(
channel: &mut russh::Channel<russh::client::Msg>,
timeouts: &TimeoutConfig,
) -> Result<(), Error> {
@@ -266,6 +302,7 @@ pub async fn wait_for_shell_ready(
Ok(Some(ChannelMsg::Data { data })) => {
buffer.extend_from_slice(&data);
let output = String::from_utf8_lossy(&buffer);
let output = output.trim();
if output.ends_with('>') || output.ends_with('#') {
debug!("[Brocade] Shell ready");
return Ok(());
@@ -279,7 +316,7 @@ pub async fn wait_for_shell_ready(
Ok(())
}
pub async fn try_elevate_session(
async fn try_elevate_session(
channel: &mut russh::Channel<russh::client::Msg>,
username: &str,
password: &str,

View File

@@ -0,0 +1,14 @@
[package]
name = "example-openbao"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_macros = { path = "../../harmony_macros" }
harmony_types = { path = "../../harmony_types" }
tokio.workspace = true
url.workspace = true

View File

@@ -0,0 +1,7 @@
To install an openbao instance with harmony simply `cargo run -p example-openbao` .
Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster.
Then follow the openbao documentation to initialize and unseal, this will make openbao usable.
https://openbao.org/docs/platform/k8s/helm/run/

View File

@@ -0,0 +1,67 @@
use std::{collections::HashMap, str::FromStr};
use harmony::{
inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
topology::K8sAnywhereTopology,
};
use harmony_macros::hurl;
#[tokio::main]
async fn main() {
let values_yaml = Some(
r#"server:
standalone:
enabled: true
config: |
listener "tcp" {
tls_disable = true
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/openbao/data"
}
service:
enabled: true
dataStorage:
enabled: true
size: 10Gi
storageClass: null
accessMode: ReadWriteOnce
auditStorage:
enabled: true
size: 10Gi
storageClass: null
accessMode: ReadWriteOnce"#
.to_string(),
);
let openbao = HelmChartScore {
namespace: Some(NonBlankString::from_str("openbao").unwrap()),
release_name: NonBlankString::from_str("openbao").unwrap(),
chart_name: NonBlankString::from_str("openbao/openbao").unwrap(),
chart_version: None,
values_overrides: None,
values_yaml,
create_namespace: true,
install_only: true,
repository: Some(HelmRepository::new(
"openbao".to_string(),
hurl!("https://openbao.github.io/openbao-helm"),
true,
)),
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(openbao)],
None,
)
.await
.unwrap();
}

View File

@@ -78,6 +78,7 @@ askama.workspace = true
sqlx.workspace = true
inquire.workspace = true
brocade = { path = "../brocade" }
option-ext = "0.2.0"
[dev-dependencies]
pretty_assertions.workspace = true

View File

@@ -12,11 +12,11 @@ pub type FirewallGroup = Vec<PhysicalHost>;
pub struct PhysicalHost {
pub id: Id,
pub category: HostCategory,
pub network: Vec<NetworkInterface>, // FIXME: Don't use harmony_inventory_agent::NetworkInterface
pub storage: Vec<StorageDrive>, // FIXME: Don't use harmony_inventory_agent::StorageDrive
pub network: Vec<NetworkInterface>,
pub storage: Vec<StorageDrive>,
pub labels: Vec<Label>,
pub memory_modules: Vec<MemoryModule>, // FIXME: Don't use harmony_inventory_agent::MemoryModule
pub cpus: Vec<CPU>, // FIXME: Don't use harmony_inventory_agent::CPU
pub memory_modules: Vec<MemoryModule>,
pub cpus: Vec<CPU>,
}
impl PhysicalHost {

View File

@@ -2,9 +2,10 @@ use async_trait::async_trait;
use brocade::BrocadeOptions;
use harmony_macros::ip;
use harmony_secret::SecretManager;
use harmony_types::net::MacAddress;
use harmony_types::net::Url;
use harmony_types::switch::PortLocation;
use harmony_types::{
net::{MacAddress, Url},
switch::PortLocation,
};
use k8s_openapi::api::core::v1::Namespace;
use kube::api::ObjectMeta;
use log::debug;
@@ -15,40 +16,19 @@ use crate::executors::ExecutorError;
use crate::hardware::PhysicalHost;
use crate::infra::brocade::BrocadeSwitchAuth;
use crate::infra::brocade::BrocadeSwitchClient;
use crate::modules::okd::crd::InstallPlanApproval;
use crate::modules::okd::crd::OperatorGroup;
use crate::modules::okd::crd::OperatorGroupSpec;
use crate::modules::okd::crd::Subscription;
use crate::modules::okd::crd::SubscriptionSpec;
use crate::modules::okd::crd::nmstate;
use crate::modules::okd::crd::nmstate::NMState;
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicy;
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec;
use crate::modules::okd::crd::{
InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec,
nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec},
};
use crate::topology::PxeOptions;
use super::DHCPStaticEntry;
use super::DhcpServer;
use super::DnsRecord;
use super::DnsRecordType;
use super::DnsServer;
use super::Firewall;
use super::HostNetworkConfig;
use super::HttpServer;
use super::IpAddress;
use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::PreparationError;
use super::PreparationOutcome;
use super::Router;
use super::Switch;
use super::SwitchClient;
use super::SwitchError;
use super::TftpServer;
use super::{
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
Topology, k8s::K8sClient,
};
use super::Topology;
use super::k8s::K8sClient;
use std::collections::BTreeMap;
use std::net::IpAddr;
use std::sync::Arc;
@@ -513,6 +493,12 @@ impl HttpServer for HAClusterTopology {
#[async_trait]
impl Switch for HAClusterTopology {
async fn setup_switch(&self) -> Result<(), SwitchError> {
let client = self.get_switch_client().await?;
client.setup().await?;
Ok(())
}
async fn get_port_for_mac_address(
&self,
mac_address: &MacAddress,
@@ -527,7 +513,7 @@ impl Switch for HAClusterTopology {
host: &PhysicalHost,
config: HostNetworkConfig,
) -> Result<(), SwitchError> {
// self.configure_bond(host, &config).await?;
self.configure_bond(host, &config).await?;
self.configure_port_channel(host, &config).await
}
}

View File

@@ -1,13 +1,19 @@
use std::time::Duration;
use derive_new::new;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod},
api::{
apps::v1::Deployment,
core::v1::{Pod, PodStatus},
},
};
use kube::{
Client, Config, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
error::DiscoveryError,
runtime::reflector::Lookup,
};
use kube::{api::DynamicObject, runtime::conditions};
@@ -19,7 +25,7 @@ use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::{Value, json};
use similar::TextDiff;
use tokio::io::AsyncReadExt;
use tokio::{io::AsyncReadExt, time::sleep};
#[derive(new, Clone)]
pub struct K8sClient {
@@ -153,6 +159,41 @@ impl K8sClient {
}
}
pub async fn wait_for_pod_ready(
&self,
pod_name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let mut elapsed = 0;
let interval = 5; // seconds between checks
let timeout_secs = 120;
loop {
let pod = self.get_pod(pod_name, namespace).await?;
if let Some(p) = pod {
if let Some(status) = p.status {
if let Some(phase) = status.phase {
if phase.to_lowercase() == "running" {
return Ok(());
}
}
}
}
if elapsed >= timeout_secs {
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"'{}' in ns '{}' did not become ready within {}s",
pod_name,
namespace.unwrap(),
timeout_secs
))));
}
sleep(Duration::from_secs(interval)).await;
elapsed += interval;
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
@@ -419,9 +460,12 @@ impl K8sClient {
.as_str()
.expect("couldn't get kind as str");
let split: Vec<&str> = api_version.splitn(2, "/").collect();
let g = split[0];
let v = split[1];
let mut it = api_version.splitn(2, '/');
let first = it.next().unwrap();
let (g, v) = match it.next() {
Some(second) => (first, second),
None => ("", first),
};
let gvk = GroupVersionKind::gvk(g, v, kind);
let api_resource = ApiResource::from_gvk(&gvk);

View File

@@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync {
&self,
service: &LoadBalancerService,
) -> Result<(), ExecutorError> {
debug!(
"Listing LoadBalancer services {:?}",
self.list_services().await
);
if !self.list_services().await.contains(service) {
self.add_service(service).await?;
}
self.add_service(service).await?;
Ok(())
}
}

View File

@@ -178,6 +178,8 @@ impl FromStr for DnsRecordType {
#[async_trait]
pub trait Switch: Send + Sync {
async fn setup_switch(&self) -> Result<(), SwitchError>;
async fn get_port_for_mac_address(
&self,
mac_address: &MacAddress,
@@ -224,6 +226,18 @@ impl Error for SwitchError {}
#[async_trait]
pub trait SwitchClient: Send + Sync {
/// Executes essential, idempotent, one-time initial configuration steps.
///
/// This is an opiniated procedure that setups a switch to provide high availability
/// capabilities as decided by the NationTech team.
///
/// This includes tasks like enabling switchport for all interfaces
/// except the ones intended for Fabric Networking, etc.
///
/// The implementation must ensure the operation is **idempotent** (safe to run multiple times)
/// and that it doesn't break existing configurations.
async fn setup(&self) -> Result<(), SwitchError>;
async fn find_port(
&self,
mac_address: &MacAddress,

View File

@@ -1,10 +1,11 @@
use async_trait::async_trait;
use brocade::{BrocadeClient, BrocadeOptions};
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
use harmony_secret::Secret;
use harmony_types::{
net::{IpAddress, MacAddress},
switch::{PortDeclaration, PortLocation},
};
use option_ext::OptionExt;
use serde::{Deserialize, Serialize};
use crate::topology::{SwitchClient, SwitchError};
@@ -27,13 +28,52 @@ impl BrocadeSwitchClient {
#[async_trait]
impl SwitchClient for BrocadeSwitchClient {
async fn setup(&self) -> Result<(), SwitchError> {
let stack_topology = self
.brocade
.get_stack_topology()
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
let interfaces = self
.brocade
.get_interfaces()
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
let interfaces: Vec<(String, PortOperatingMode)> = interfaces
.into_iter()
.filter(|interface| {
interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected
})
.filter(|interface| {
!stack_topology.iter().any(|link: &InterSwitchLink| {
link.local_port == interface.port_location
|| link.remote_port.contains(&interface.port_location)
})
})
.map(|interface| (interface.name.clone(), PortOperatingMode::Access))
.collect();
if interfaces.is_empty() {
return Ok(());
}
self.brocade
.configure_interfaces(interfaces)
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
Ok(())
}
async fn find_port(
&self,
mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError> {
let table = self
.brocade
.show_mac_address_table()
.get_mac_address_table()
.await
.map_err(|e| SwitchError::new(format!("{e}")))?;
@@ -79,3 +119,267 @@ pub struct BrocadeSwitchAuth {
pub username: String,
pub password: String,
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use assertor::*;
use async_trait::async_trait;
use brocade::{
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
};
use harmony_types::switch::PortLocation;
use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient};
#[tokio::test]
async fn setup_should_configure_ethernet_interfaces_as_access_ports() {
let first_interface = given_interface()
.with_port_location(PortLocation(1, 0, 1))
.build();
let second_interface = given_interface()
.with_port_location(PortLocation(1, 0, 4))
.build();
let brocade = Box::new(FakeBrocadeClient::new(
vec![],
vec![first_interface.clone(), second_interface.clone()],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).contains_exactly(vec![
(first_interface.name.clone(), PortOperatingMode::Access),
(second_interface.name.clone(), PortOperatingMode::Access),
]);
}
#[tokio::test]
async fn setup_with_an_already_configured_interface_should_skip_configuration() {
let brocade = Box::new(FakeBrocadeClient::new(
vec![],
vec![
given_interface()
.with_operating_mode(Some(PortOperatingMode::Access))
.build(),
],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).is_empty();
}
#[tokio::test]
async fn setup_with_a_disconnected_interface_should_skip_configuration() {
let brocade = Box::new(FakeBrocadeClient::new(
vec![],
vec![
given_interface()
.with_status(InterfaceStatus::SfpAbsent)
.build(),
given_interface()
.with_status(InterfaceStatus::NotConnected)
.build(),
],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).is_empty();
}
#[tokio::test]
async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() {
let brocade = Box::new(FakeBrocadeClient::new(
vec![
given_inter_switch_link()
.between(PortLocation(1, 0, 1), PortLocation(2, 0, 1))
.build(),
given_inter_switch_link()
.between(PortLocation(2, 0, 2), PortLocation(3, 0, 1))
.build(),
],
vec![
given_interface()
.with_port_location(PortLocation(1, 0, 1))
.build(),
given_interface()
.with_port_location(PortLocation(2, 0, 1))
.build(),
given_interface()
.with_port_location(PortLocation(3, 0, 1))
.build(),
],
));
let client = BrocadeSwitchClient {
brocade: brocade.clone(),
};
client.setup().await.unwrap();
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).is_empty();
}
#[derive(Clone)]
struct FakeBrocadeClient {
stack_topology: Vec<InterSwitchLink>,
interfaces: Vec<InterfaceInfo>,
configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>,
}
#[async_trait]
impl BrocadeClient for FakeBrocadeClient {
async fn version(&self) -> Result<BrocadeInfo, Error> {
todo!()
}
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
todo!()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
Ok(self.stack_topology.clone())
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
Ok(self.interfaces.clone())
}
async fn configure_interfaces(
&self,
interfaces: Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
*configured_interfaces = interfaces;
Ok(())
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
todo!()
}
async fn create_port_channel(
&self,
_channel_id: PortChannelId,
_channel_name: &str,
_ports: &[PortLocation],
) -> Result<(), Error> {
todo!()
}
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
todo!()
}
}
impl FakeBrocadeClient {
fn new(stack_topology: Vec<InterSwitchLink>, interfaces: Vec<InterfaceInfo>) -> Self {
Self {
stack_topology,
interfaces,
configured_interfaces: Arc::new(Mutex::new(vec![])),
}
}
}
struct InterfaceInfoBuilder {
port_location: Option<PortLocation>,
interface_type: Option<InterfaceType>,
operating_mode: Option<PortOperatingMode>,
status: Option<InterfaceStatus>,
}
impl InterfaceInfoBuilder {
fn build(&self) -> InterfaceInfo {
let interface_type = self
.interface_type
.clone()
.unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into()));
let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1));
let name = format!("{interface_type} {port_location}");
let status = self.status.clone().unwrap_or(InterfaceStatus::Connected);
InterfaceInfo {
name,
port_location,
interface_type,
operating_mode: self.operating_mode.clone(),
status,
}
}
fn with_port_location(self, port_location: PortLocation) -> Self {
Self {
port_location: Some(port_location),
..self
}
}
fn with_operating_mode(self, operating_mode: Option<PortOperatingMode>) -> Self {
Self {
operating_mode,
..self
}
}
fn with_status(self, status: InterfaceStatus) -> Self {
Self {
status: Some(status),
..self
}
}
}
struct InterSwitchLinkBuilder {
link: Option<(PortLocation, PortLocation)>,
}
impl InterSwitchLinkBuilder {
fn build(&self) -> InterSwitchLink {
let link = self
.link
.clone()
.unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1)));
InterSwitchLink {
local_port: link.0,
remote_port: Some(link.1),
}
}
fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self {
Self {
link: Some((local_port, remote_port)),
}
}
}
fn given_interface() -> InterfaceInfoBuilder {
InterfaceInfoBuilder {
port_location: None,
interface_type: None,
operating_mode: None,
status: None,
}
}
fn given_inter_switch_link() -> InterSwitchLinkBuilder {
InterSwitchLinkBuilder { link: None }
}
}

View File

@@ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall {
}
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
warn!(
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
);
let mut config = self.opnsense_config.write().await;
let mut load_balancer = config.load_balancer();
let (frontend, backend, servers, healthcheck) =
harmony_load_balancer_service_to_haproxy_xml(service);
let mut load_balancer = config.load_balancer();
load_balancer.add_backend(backend);
load_balancer.add_frontend(frontend);
load_balancer.add_servers(servers);
if let Some(healthcheck) = healthcheck {
load_balancer.add_healthcheck(healthcheck);
}
load_balancer.configure_service(frontend, backend, servers, healthcheck);
Ok(())
}
@@ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
.backends
.backends
.iter()
.find(|b| b.uuid == frontend.default_backend);
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
let mut health_check = None;
match matching_backend {
@@ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
}
None => {
warn!(
"HAProxy config could not find a matching backend for frontend {:?}",
frontend
"HAProxy config could not find a matching backend for frontend {frontend:?}"
);
}
}
@@ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend(
.servers
.iter()
.filter_map(|server| {
let address = server.address.clone()?;
let port = server.port?;
if backend_servers.contains(&server.uuid.as_str()) {
return Some(BackendServer {
address: server.address.clone(),
port: server.port,
});
return Some(BackendServer { address, port });
}
None
})
@@ -347,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
name: format!("frontend_{}", service.listening_port),
bind: service.listening_port.to_string(),
mode: "tcp".to_string(), // TODO do not depend on health check here
default_backend: backend.uuid.clone(),
default_backend: Some(backend.uuid.clone()),
..Default::default()
};
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
@@ -361,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
uuid: Uuid::new_v4().to_string(),
name: format!("{}_{}", &server.address, &server.port),
enabled: 1,
address: server.address.clone(),
port: server.port,
address: Some(server.address.clone()),
port: Some(server.port),
mode: "active".to_string(),
server_type: "static".to_string(),
..Default::default()
@@ -385,8 +378,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
address: Some("192.168.1.1".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
@@ -411,8 +404,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
address: Some("192.168.1.1".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
@@ -431,8 +424,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
address: Some("192.168.1.1".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
@@ -453,16 +446,16 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "some-hostname.test.mcd".to_string(),
port: 80,
address: Some("some-hostname.test.mcd".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
let server = HAProxyServer {
uuid: "server2".to_string(),
address: "192.168.1.2".to_string(),
port: 8080,
address: Some("192.168.1.2".to_string()),
port: Some(8080),
..Default::default()
};
haproxy.servers.servers.push(server);

View File

@@ -1,4 +1,5 @@
use async_trait::async_trait;
use harmony_macros::hurl;
use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use non_blank_string_rs::NonBlankString;
@@ -1051,7 +1052,7 @@ commitServer:
install_only: false,
repository: Some(HelmRepository::new(
"argo".to_string(),
url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(),
hurl!("https://argoproj.github.io/argo-helm"),
true,
)),
}

View File

@@ -1,5 +1,6 @@
use std::{collections::HashMap, str::FromStr};
use harmony_macros::hurl;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use url::Url;
@@ -33,7 +34,7 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
install_only: true,
repository: Some(HelmRepository::new(
"jetstack".to_string(),
Url::parse("https://charts.jetstack.io").unwrap(),
hurl!("https://charts.jetstack.io"),
true,
)),
}

View File

@@ -5,6 +5,7 @@ use crate::score::Score;
use crate::topology::{HelmCommand, Topology};
use async_trait::async_trait;
use harmony_types::id::Id;
use harmony_types::net::Url;
use helm_wrapper_rs;
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
use log::{debug, info, warn};
@@ -15,7 +16,6 @@ use std::path::Path;
use std::process::{Command, Output, Stdio};
use std::str::FromStr;
use temp_file::TempFile;
use url::Url;
#[derive(Debug, Clone, Serialize)]
pub struct HelmRepository {
@@ -78,7 +78,8 @@ impl HelmChartInterpret {
repo.name, repo.url, repo.force_update
);
let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
let repo_url = repo.url.to_string();
let mut add_args = vec!["repo", "add", &repo.name, &repo_url];
if repo.force_update {
add_args.push("--force-update");
}

View File

@@ -1,364 +0,0 @@
use async_trait::async_trait;
use log::debug;
use serde::Serialize;
use std::collections::HashMap;
use std::io::ErrorKind;
use std::path::PathBuf;
use std::process::{Command, Output};
use temp_dir::{self, TempDir};
use temp_file::TempFile;
use crate::data::Version;
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
use crate::inventory::Inventory;
use crate::score::Score;
use crate::topology::{HelmCommand, K8sclient, Topology};
use harmony_types::id::Id;
#[derive(Clone)]
pub struct HelmCommandExecutor {
pub env: HashMap<String, String>,
pub path: Option<PathBuf>,
pub args: Vec<String>,
pub api_versions: Option<Vec<String>>,
pub kube_version: String,
pub debug: Option<bool>,
pub globals: HelmGlobals,
pub chart: HelmChart,
}
#[derive(Clone)]
pub struct HelmGlobals {
pub chart_home: Option<PathBuf>,
pub config_home: Option<PathBuf>,
}
#[derive(Debug, Clone, Serialize)]
pub struct HelmChart {
pub name: String,
pub version: Option<String>,
pub repo: Option<String>,
pub release_name: Option<String>,
pub namespace: Option<String>,
pub additional_values_files: Vec<PathBuf>,
pub values_file: Option<PathBuf>,
pub values_inline: Option<String>,
pub include_crds: Option<bool>,
pub skip_hooks: Option<bool>,
pub api_versions: Option<Vec<String>>,
pub kube_version: Option<String>,
pub name_template: String,
pub skip_tests: Option<bool>,
pub debug: Option<bool>,
}
impl HelmCommandExecutor {
pub fn generate(mut self) -> Result<String, std::io::Error> {
if self.globals.chart_home.is_none() {
self.globals.chart_home = Some(PathBuf::from("charts"));
}
if self
.clone()
.chart
.clone()
.chart_exists_locally(self.clone().globals.chart_home.unwrap())
.is_none()
{
if self.chart.repo.is_none() {
return Err(std::io::Error::new(
ErrorKind::Other,
"Chart doesn't exist locally and no repo specified",
));
}
self.clone().run_command(
self.chart
.clone()
.pull_command(self.globals.chart_home.clone().unwrap()),
)?;
}
let out = self.clone().run_command(
self.chart
.clone()
.helm_args(self.globals.chart_home.clone().unwrap()),
)?;
// TODO: don't use unwrap here
let s = String::from_utf8(out.stdout).unwrap();
debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
debug!("helm status: {}", out.status);
debug!("helm output: {s}");
let clean = s.split_once("---").unwrap().1;
Ok(clean.to_string())
}
pub fn version(self) -> Result<String, std::io::Error> {
let out = self.run_command(vec![
"version".to_string(),
"-c".to_string(),
"--short".to_string(),
])?;
// TODO: don't use unwrap
Ok(String::from_utf8(out.stdout).unwrap())
}
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
}
let path = if let Some(p) = self.path {
p
} else {
PathBuf::from("helm")
};
let config_home = match self.globals.config_home {
Some(p) => p,
None => PathBuf::from(TempDir::new()?.path()),
};
if let Some(yaml_str) = self.chart.values_inline {
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
};
self.env.insert(
"HELM_CONFIG_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
self.env.insert(
"HELM_CACHE_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
self.env.insert(
"HELM_DATA_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
Command::new(path).envs(self.env).args(args).output()
}
}
impl HelmChart {
pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
let chart_path =
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
if chart_path.exists() {
Some(chart_path)
} else {
None
}
}
pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
let mut args = vec![
"pull".to_string(),
"--untar".to_string(),
"--untardir".to_string(),
chart_home.to_str().unwrap().to_string(),
];
match self.repo {
Some(r) => {
if r.starts_with("oci://") {
args.push(
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
);
} else {
args.push("--repo".to_string());
args.push(r.to_string());
args.push(self.name);
}
}
None => args.push(self.name),
};
if let Some(v) = self.version {
args.push("--version".to_string());
args.push(v.to_string());
}
args
}
pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
let mut args: Vec<String> = vec!["template".to_string()];
match self.release_name {
Some(rn) => args.push(rn.to_string()),
None => args.push("--generate-name".to_string()),
}
args.push(
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
.to_str()
.unwrap()
.to_string(),
);
if let Some(n) = self.namespace {
args.push("--namespace".to_string());
args.push(n.to_string());
}
if let Some(f) = self.values_file {
args.push("-f".to_string());
args.push(f.to_str().unwrap().to_string());
}
for f in self.additional_values_files {
args.push("-f".to_string());
args.push(f.to_str().unwrap().to_string());
}
if let Some(vv) = self.api_versions {
for v in vv {
args.push("--api-versions".to_string());
args.push(v);
}
}
if let Some(kv) = self.kube_version {
args.push("--kube-version".to_string());
args.push(kv);
}
if let Some(crd) = self.include_crds {
if crd {
args.push("--include-crds".to_string());
}
}
if let Some(st) = self.skip_tests {
if st {
args.push("--skip-tests".to_string());
}
}
if let Some(sh) = self.skip_hooks {
if sh {
args.push("--no-hooks".to_string());
}
}
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
}
args
}
}
#[derive(Debug, Clone, Serialize)]
pub struct HelmChartScoreV2 {
pub chart: HelmChart,
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HelmChartInterpretV2 {
score: self.clone(),
})
}
fn name(&self) -> String {
format!(
"{} {} HelmChartScoreV2",
self.chart
.release_name
.clone()
.unwrap_or("Unknown".to_string()),
self.chart.name
)
}
}
#[derive(Debug, Serialize)]
pub struct HelmChartInterpretV2 {
pub score: HelmChartScoreV2,
}
impl HelmChartInterpretV2 {}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let _ns = self
.score
.chart
.namespace
.as_ref()
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
let helm_executor = HelmCommandExecutor {
env: HashMap::new(),
path: None,
args: vec![],
api_versions: None,
kube_version: "v1.33.0".to_string(),
debug: Some(false),
globals: HelmGlobals {
chart_home: None,
config_home: None,
},
chart: self.score.chart.clone(),
};
// let mut helm_options = Vec::new();
// if self.score.create_namespace {
// helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
// }
let res = helm_executor.generate();
let _output = match res {
Ok(output) => output,
Err(err) => return Err(InterpretError::new(err.to_string())),
};
// TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
// let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
// let client = topology
// .k8s_client()
// .await
// .expect("Environment should provide enough information to instanciate a client")
// .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
// match client.apply_yaml(output) {
// Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
// Err(e) => return Err(InterpretError::new(e)),
// }
Ok(Outcome::success("Helm chart deployed".to_string()))
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmCommand
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -1,2 +1 @@
pub mod chart;
pub mod command;

View File

@@ -4,4 +4,5 @@ pub mod application_monitoring;
pub mod grafana;
pub mod kube_prometheus;
pub mod ntfy;
pub mod okd;
pub mod prometheus;

View File

@@ -0,0 +1,149 @@
use std::{collections::BTreeMap, sync::Arc};
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
use async_trait::async_trait;
use harmony_types::id::Id;
use k8s_openapi::api::core::v1::ConfigMap;
use kube::api::ObjectMeta;
use serde::Serialize;
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoring {}
impl<T: Topology + K8sclient> Score<T> for OpenshiftUserWorkloadMonitoring {
fn name(&self) -> String {
"OpenshiftUserWorkloadMonitoringScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OpenshiftUserWorkloadMonitoringInterpret {})
}
}
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoringInterpret {}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.update_cluster_monitoring_config_cm(&client).await?;
self.update_user_workload_monitoring_config_cm(&client)
.await?;
self.verify_user_workload(&client).await?;
Ok(Outcome::success(
"successfully enabled user-workload-monitoring".to_string(),
))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OpenshiftUserWorkloadMonitoring")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl OpenshiftUserWorkloadMonitoringInterpret {
pub async fn update_cluster_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
enableUserWorkload: true
alertmanagerMain:
enableUserAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("cluster-monitoring-config".to_string()),
namespace: Some("openshift-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client.apply(&cm, Some("openshift-monitoring")).await?;
Ok(Outcome::success(
"updated cluster-monitoring-config-map".to_string(),
))
}
pub async fn update_user_workload_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
alertmanager:
enabled: true
enableAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("user-workload-monitoring-config".to_string()),
namespace: Some("openshift-user-workload-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client
.apply(&cm, Some("openshift-user-workload-monitoring"))
.await?;
Ok(Outcome::success(
"updated openshift-user-monitoring-config-map".to_string(),
))
}
pub async fn verify_user_workload(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let namespace = "openshift-user-workload-monitoring";
let alertmanager_name = "alertmanager-user-workload-0";
let prometheus_name = "prometheus-user-workload-0";
client
.wait_for_pod_ready(alertmanager_name, Some(namespace))
.await?;
client
.wait_for_pod_ready(prometheus_name, Some(namespace))
.await?;
Ok(Outcome::success(format!(
"pods: {}, {} ready in ns: {}",
alertmanager_name, prometheus_name, namespace
)))
}
}

View File

@@ -0,0 +1 @@
pub mod enable_user_workload;

View File

@@ -215,7 +215,7 @@ impl OKDSetup03ControlPlaneInterpret {
) -> Result<(), InterpretError> {
info!("[ControlPlane] Ensuring persistent bonding");
let score = HostNetworkConfigurationScore {
hosts: hosts.clone(), // FIXME: Avoid clone if possible
hosts: hosts.clone(),
};
score.interpret(inventory, topology).await?;

View File

@@ -77,6 +77,8 @@ impl OKDBootstrapLoadBalancerScore {
address: topology.bootstrap_host.ip.to_string(),
port,
});
backend.dedup();
backend
}
}

View File

@@ -240,7 +240,7 @@ pub struct OvsPortSpec {
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct EthtoolSpec {
// FIXME: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]

View File

@@ -1,6 +1,6 @@
use async_trait::async_trait;
use harmony_types::id::Id;
use log::{debug, info, warn};
use log::{debug, info};
use serde::Serialize;
use crate::{
@@ -34,6 +34,59 @@ pub struct HostNetworkConfigurationInterpret {
score: HostNetworkConfigurationScore,
}
impl HostNetworkConfigurationInterpret {
async fn configure_network_for_host<T: Topology + Switch>(
&self,
topology: &T,
host: &PhysicalHost,
) -> Result<(), InterpretError> {
let switch_ports = self.collect_switch_ports_for_host(topology, host).await?;
if !switch_ports.is_empty() {
topology
.configure_host_network(host, HostNetworkConfig { switch_ports })
.await
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
}
Ok(())
}
async fn collect_switch_ports_for_host<T: Topology + Switch>(
&self,
topology: &T,
host: &PhysicalHost,
) -> Result<Vec<SwitchPort>, InterpretError> {
let mut switch_ports = vec![];
for network_interface in &host.network {
let mac_address = network_interface.mac_address;
match topology.get_port_for_mac_address(&mac_address).await {
Ok(Some(port)) => {
switch_ports.push(SwitchPort {
interface: NetworkInterface {
name: network_interface.name.clone(),
mac_address,
speed_mbps: network_interface.speed_mbps,
mtu: network_interface.mtu,
},
port,
});
}
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
Err(e) => {
return Err(InterpretError::new(format!(
"Failed to get port for host '{}': {}",
host.id, e
)));
}
}
}
Ok(switch_ports)
}
}
#[async_trait]
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
fn get_name(&self) -> InterpretName {
@@ -57,43 +110,24 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
if self.score.hosts.is_empty() {
return Ok(Outcome::noop("No hosts to configure".into()));
}
info!(
"Started network configuration for {} host(s)...",
self.score.hosts.len()
);
topology
.setup_switch()
.await
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
let mut configured_host_count = 0;
for host in &self.score.hosts {
let mut switch_ports = vec![];
for network_interface in &host.network {
let mac_address = network_interface.mac_address;
match topology.get_port_for_mac_address(&mac_address).await {
Ok(Some(port)) => {
switch_ports.push(SwitchPort {
interface: NetworkInterface {
name: network_interface.name.clone(),
mac_address,
speed_mbps: network_interface.speed_mbps,
mtu: network_interface.mtu,
},
port,
});
}
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
Err(e) => warn!("Failed to get port for host '{}': {}", host.id, e),
}
}
if !switch_ports.is_empty() {
configured_host_count += 1;
topology
.configure_host_network(host, HostNetworkConfig { switch_ports })
.await
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
}
self.configure_network_for_host(topology, host).await?;
configured_host_count += 1;
}
if configured_host_count > 0 {
@@ -151,6 +185,18 @@ mod tests {
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
}
#[tokio::test]
async fn should_setup_switch() {
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
let score = given_score(vec![host]);
let topology = TopologyWithSwitch::new();
let _ = score.interpret(&Inventory::empty(), &topology).await;
let switch_setup = topology.switch_setup.lock().unwrap();
assert_that!(*switch_setup).is_true();
}
#[tokio::test]
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
@@ -237,7 +283,6 @@ mod tests {
#[tokio::test]
async fn port_not_found_for_mac_address_should_not_configure_interface() {
// FIXME: Should it still configure an empty bond/port channel?
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
let topology = TopologyWithSwitch::new_port_not_found();
@@ -284,6 +329,7 @@ mod tests {
struct TopologyWithSwitch {
available_ports: Arc<Mutex<Vec<PortLocation>>>,
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
switch_setup: Arc<Mutex<bool>>,
}
impl TopologyWithSwitch {
@@ -291,6 +337,7 @@ mod tests {
Self {
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
configured_host_networks: Arc::new(Mutex::new(vec![])),
switch_setup: Arc::new(Mutex::new(false)),
}
}
@@ -298,6 +345,7 @@ mod tests {
Self {
available_ports: Arc::new(Mutex::new(vec![])),
configured_host_networks: Arc::new(Mutex::new(vec![])),
switch_setup: Arc::new(Mutex::new(false)),
}
}
}
@@ -315,6 +363,12 @@ mod tests {
#[async_trait]
impl Switch for TopologyWithSwitch {
async fn setup_switch(&self) -> Result<(), SwitchError> {
let mut switch_configured = self.switch_setup.lock().unwrap();
*switch_configured = true;
Ok(())
}
async fn get_port_for_mac_address(
&self,
_mac_address: &MacAddress,

View File

@@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
}
}
#[derive(PartialEq, Debug)]
#[derive(PartialEq, Debug, Clone)]
pub struct HAProxyId(String);
impl Default for HAProxyId {
@@ -297,7 +297,7 @@ pub struct HAProxyFrontends {
pub frontend: Vec<Frontend>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Frontend {
#[yaserde(attribute = true)]
pub uuid: String,
@@ -310,7 +310,7 @@ pub struct Frontend {
pub bind_options: MaybeString,
pub mode: String,
#[yaserde(rename = "defaultBackend")]
pub default_backend: String,
pub default_backend: Option<String>,
pub ssl_enabled: i32,
pub ssl_certificates: MaybeString,
pub ssl_default_certificate: MaybeString,
@@ -416,7 +416,7 @@ pub struct HAProxyBackends {
pub backends: Vec<HAProxyBackend>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyBackend {
#[yaserde(attribute = true, rename = "uuid")]
pub uuid: String,
@@ -535,7 +535,7 @@ pub struct HAProxyServers {
pub servers: Vec<HAProxyServer>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyServer {
#[yaserde(attribute = true, rename = "uuid")]
pub uuid: String,
@@ -543,8 +543,8 @@ pub struct HAProxyServer {
pub enabled: u8,
pub name: String,
pub description: MaybeString,
pub address: String,
pub port: u16,
pub address: Option<String>,
pub port: Option<u16>,
pub checkport: MaybeString,
pub mode: String,
pub multiplexer_protocol: MaybeString,
@@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks {
pub healthchecks: Vec<HAProxyHealthCheck>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyHealthCheck {
#[yaserde(attribute = true)]
pub uuid: String,

View File

@@ -25,6 +25,7 @@ sha2 = "0.10.9"
[dev-dependencies]
pretty_assertions.workspace = true
assertor.workspace = true
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }

View File

@@ -30,8 +30,7 @@ impl SshConfigManager {
self.opnsense_shell
.exec(&format!(
"cp /conf/config.xml /conf/backup/{}",
backup_filename
"cp /conf/config.xml /conf/backup/{backup_filename}"
))
.await
}

View File

@@ -1,9 +1,7 @@
mod ssh;
pub use ssh::*;
use async_trait::async_trait;
use crate::Error;
use async_trait::async_trait;
pub use ssh::*;
#[async_trait]
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {

View File

@@ -1,11 +1,8 @@
use std::sync::Arc;
use log::warn;
use crate::{config::OPNsenseShell, Error};
use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
};
use crate::{config::OPNsenseShell, Error};
use std::{collections::HashSet, sync::Arc};
pub struct LoadBalancerConfig<'a> {
opnsense: &'a mut OPNsense,
@@ -31,7 +28,7 @@ impl<'a> LoadBalancerConfig<'a> {
match &mut self.opnsense.opnsense.haproxy.as_mut() {
Some(haproxy) => f(haproxy),
None => unimplemented!(
"Adding a backend is not supported when haproxy config does not exist yet"
"Cannot configure load balancer when haproxy config does not exist yet"
),
}
}
@@ -40,21 +37,67 @@ impl<'a> LoadBalancerConfig<'a> {
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
}
pub fn add_backend(&mut self, backend: HAProxyBackend) {
warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks");
self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend));
/// Configures a service by removing any existing service on the same port
/// and then adding the new definition. This ensures idempotency.
pub fn configure_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.remove_service_by_bind_address(&frontend.bind);
self.remove_servers(&servers);
self.add_new_service(frontend, backend, servers, healthcheck);
}
pub fn add_frontend(&mut self, frontend: Frontend) {
self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend));
// Remove the corresponding real servers based on their name if they already exist.
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
self.with_haproxy(|haproxy| {
haproxy
.servers
.servers
.retain(|s| !server_names.contains(&s.name));
});
}
pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) {
self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck));
/// Removes a service and its dependent components based on the frontend's bind address.
/// This performs a cascading delete of the frontend, backend, servers, and health check.
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
self.with_haproxy(|haproxy| {
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
return;
};
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
return;
};
remove_healthcheck(haproxy, &old_backend);
remove_linked_servers(haproxy, &old_backend);
});
}
pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) {
self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers));
/// Adds the components of a new service to the HAProxy configuration.
/// This function de-duplicates servers by name to prevent configuration errors.
fn add_new_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.with_haproxy(|haproxy| {
if let Some(check) = healthcheck {
haproxy.healthchecks.healthchecks.push(check);
}
haproxy.servers.servers.extend(servers);
haproxy.backends.backends.push(backend);
haproxy.frontends.frontend.push(frontend);
});
}
pub async fn reload_restart(&self) -> Result<(), Error> {
@@ -82,3 +125,262 @@ impl<'a> LoadBalancerConfig<'a> {
Ok(())
}
}
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
let pos = haproxy
.frontends
.frontend
.iter()
.position(|f| f.bind == bind_address);
match pos {
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
None => None,
}
}
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
let default_backend = old_frontend.default_backend?;
let pos = haproxy
.backends
.backends
.iter()
.position(|b| b.uuid == default_backend);
match pos {
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
None => None, // orphaned frontend, shouldn't happen
}
}
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(uuid) = &backend.health_check.content {
haproxy
.healthchecks
.healthchecks
.retain(|h| h.uuid != *uuid);
}
}
/// Remove the backend's servers. This assumes servers are not shared between services.
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(server_uuids_str) = &backend.linked_servers.content {
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
haproxy
.servers
.servers
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
}
}
#[cfg(test)]
mod tests {
use crate::config::DummyOPNSenseShell;
use assertor::*;
use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
};
use std::sync::Arc;
use super::LoadBalancerConfig;
static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
static SERVER_ADDRESS: &str = "1.1.1.1:80";
static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
#[test]
fn configure_service_should_add_all_service_components_to_haproxy() {
let mut opnsense = given_opnsense();
let mut load_balancer = given_load_balancer(&mut opnsense);
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
load_balancer.configure_service(
frontend.clone(),
backend.clone(),
servers.clone(),
Some(healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![frontend],
vec![backend],
servers,
vec![healthcheck],
);
}
#[test]
fn configure_service_should_replace_service_on_same_bind_address() {
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
let mut opnsense = given_opnsense_with(given_haproxy(
vec![frontend.clone()],
vec![backend.clone()],
servers.clone(),
vec![healthcheck.clone()],
));
let mut load_balancer = given_load_balancer(&mut opnsense);
let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
load_balancer.configure_service(
updated_frontend.clone(),
updated_backend.clone(),
updated_servers.clone(),
Some(updated_healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![updated_frontend],
vec![updated_backend],
updated_servers,
vec![updated_healthcheck],
);
}
#[test]
fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
let (other_healthcheck, other_servers, other_backend, other_frontend) =
given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
let mut opnsense = given_opnsense_with(given_haproxy(
vec![frontend.clone()],
vec![backend.clone()],
servers.clone(),
vec![healthcheck.clone()],
));
let mut load_balancer = given_load_balancer(&mut opnsense);
load_balancer.configure_service(
other_frontend.clone(),
other_backend.clone(),
other_servers.clone(),
Some(other_healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![frontend, other_frontend],
vec![backend, other_backend],
[servers, other_servers].concat(),
vec![healthcheck, other_healthcheck],
);
}
fn assert_haproxy_configured_with(
opnsense: OPNsense,
frontends: Vec<Frontend>,
backends: Vec<HAProxyBackend>,
servers: Vec<HAProxyServer>,
healthchecks: Vec<HAProxyHealthCheck>,
) {
let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
assert_that!(haproxy.backends.backends).contains_exactly(backends);
assert_that!(haproxy.servers.servers).is_equal_to(servers);
assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
}
fn given_opnsense() -> OPNsense {
OPNsense::default()
}
fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
let mut opnsense = OPNsense::default();
opnsense.opnsense.haproxy = Some(haproxy);
opnsense
}
fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
let opnsense_shell = Arc::new(DummyOPNSenseShell {});
if opnsense.opnsense.haproxy.is_none() {
opnsense.opnsense.haproxy = Some(HAProxy::default());
}
LoadBalancerConfig::new(opnsense, opnsense_shell)
}
fn given_service(
bind_address: &str,
server_address: &str,
) -> (
HAProxyHealthCheck,
Vec<HAProxyServer>,
HAProxyBackend,
Frontend,
) {
let healthcheck = given_healthcheck();
let servers = vec![given_server(server_address)];
let backend = given_backend();
let frontend = given_frontend(bind_address);
(healthcheck, servers, backend, frontend)
}
fn given_haproxy(
frontends: Vec<Frontend>,
backends: Vec<HAProxyBackend>,
servers: Vec<HAProxyServer>,
healthchecks: Vec<HAProxyHealthCheck>,
) -> HAProxy {
HAProxy {
frontends: HAProxyFrontends {
frontend: frontends,
},
backends: HAProxyBackends { backends },
servers: HAProxyServers { servers },
healthchecks: HAProxyHealthChecks { healthchecks },
..Default::default()
}
}
fn given_frontend(bind_address: &str) -> Frontend {
Frontend {
uuid: "uuid".into(),
id: HAProxyId::default(),
enabled: 1,
name: format!("frontend_{bind_address}"),
bind: bind_address.into(),
default_backend: Some("backend-uuid".into()),
..Default::default()
}
}
fn given_backend() -> HAProxyBackend {
HAProxyBackend {
uuid: "backend-uuid".into(),
id: HAProxyId::default(),
enabled: 1,
name: "backend_192.168.1.1:80".into(),
linked_servers: MaybeString::from("server-uuid"),
health_check_enabled: 1,
health_check: MaybeString::from("healthcheck-uuid"),
..Default::default()
}
}
fn given_server(address: &str) -> HAProxyServer {
HAProxyServer {
uuid: "server-uuid".into(),
id: HAProxyId::default(),
name: address.into(),
address: Some(address.into()),
..Default::default()
}
}
fn given_healthcheck() -> HAProxyHealthCheck {
HAProxyHealthCheck {
uuid: "healthcheck-uuid".into(),
name: "healthcheck".into(),
..Default::default()
}
}
}