Compare commits
13 Commits
fix/pxe_in
...
77e09436a9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77e09436a9 | ||
|
|
45e0de2097 | ||
|
|
731dc5f404 | ||
|
|
1199564122 | ||
|
|
f2f55d98d4 | ||
| 7b6ac6641a | |||
| 58c1fd4a96 | |||
| 2388f585f5 | |||
| ffe3c09907 | |||
| 0de52aedbf | |||
| 427009bbfe | |||
| fe0501b784 | |||
| 61b02e7a28 |
51
Cargo.lock
generated
51
Cargo.lock
generated
@@ -680,13 +680,11 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony_secret",
|
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
"regex",
|
"regex",
|
||||||
"russh",
|
"russh",
|
||||||
"russh-keys",
|
"russh-keys",
|
||||||
"serde",
|
|
||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -1820,18 +1818,6 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "example-openbao"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"harmony",
|
|
||||||
"harmony_cli",
|
|
||||||
"harmony_macros",
|
|
||||||
"harmony_types",
|
|
||||||
"tokio",
|
|
||||||
"url",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-opnsense"
|
name = "example-opnsense"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -1918,8 +1904,6 @@ dependencies = [
|
|||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
"harmony_macros",
|
"harmony_macros",
|
||||||
"harmony_secret",
|
|
||||||
"harmony_secret_derive",
|
|
||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
@@ -2379,7 +2363,6 @@ dependencies = [
|
|||||||
"once_cell",
|
"once_cell",
|
||||||
"opnsense-config",
|
"opnsense-config",
|
||||||
"opnsense-config-xml",
|
"opnsense-config-xml",
|
||||||
"option-ext",
|
|
||||||
"pretty_assertions",
|
"pretty_assertions",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
"russh",
|
"russh",
|
||||||
@@ -2446,6 +2429,17 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "harmony_derive"
|
||||||
|
version = "0.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2d138bbb32bb346299c5f95fbb53532313f39927cb47c411c99c634ef8665ef7"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 1.0.109",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "harmony_inventory_agent"
|
name = "harmony_inventory_agent"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -3892,6 +3886,19 @@ dependencies = [
|
|||||||
"web-time",
|
"web-time",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "okd_host_network"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_derive",
|
||||||
|
"harmony_inventory_agent",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "once_cell"
|
name = "once_cell"
|
||||||
version = "1.21.3"
|
version = "1.21.3"
|
||||||
@@ -3920,7 +3927,6 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
|||||||
name = "opnsense-config"
|
name = "opnsense-config"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assertor",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"chrono",
|
"chrono",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
@@ -4613,15 +4619,6 @@ version = "0.8.6"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
|
checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "remove_rook_osd"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"harmony",
|
|
||||||
"harmony_cli",
|
|
||||||
"tokio",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqwest"
|
name = "reqwest"
|
||||||
version = "0.11.27"
|
version = "0.11.27"
|
||||||
|
|||||||
@@ -15,8 +15,7 @@ members = [
|
|||||||
"harmony_inventory_agent",
|
"harmony_inventory_agent",
|
||||||
"harmony_secret_derive",
|
"harmony_secret_derive",
|
||||||
"harmony_secret",
|
"harmony_secret",
|
||||||
"adr/agent_discovery/mdns",
|
"adr/agent_discovery/mdns", "brocade",
|
||||||
"brocade",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
|
|||||||
@@ -14,5 +14,3 @@ tokio.workspace = true
|
|||||||
log.workspace = true
|
log.workspace = true
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
regex = "1.11.3"
|
regex = "1.11.3"
|
||||||
harmony_secret = { path = "../harmony_secret" }
|
|
||||||
serde.workspace = true
|
|
||||||
|
|||||||
@@ -1,70 +1,54 @@
|
|||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
|
||||||
use harmony_secret::{Secret, SecretManager};
|
|
||||||
use harmony_types::switch::PortLocation;
|
use harmony_types::switch::PortLocation;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
struct BrocadeSwitchAuth {
|
|
||||||
username: String,
|
|
||||||
password: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
env_logger::init();
|
||||||
|
|
||||||
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
||||||
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
|
||||||
let switch_addresses = vec![ip];
|
let switch_addresses = vec![ip];
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
let brocade = brocade::init(&switch_addresses, 22, "admin", "password", None)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.expect("Brocade client failed to connect");
|
||||||
|
|
||||||
let brocade = brocade::init(
|
|
||||||
&switch_addresses,
|
|
||||||
22,
|
|
||||||
&config.username,
|
|
||||||
&config.password,
|
|
||||||
Some(BrocadeOptions {
|
|
||||||
dry_run: true,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("Brocade client failed to connect");
|
|
||||||
|
|
||||||
let entries = brocade.get_stack_topology().await.unwrap();
|
|
||||||
println!("Stack topology: {entries:#?}");
|
|
||||||
|
|
||||||
let entries = brocade.get_interfaces().await.unwrap();
|
|
||||||
println!("Interfaces: {entries:#?}");
|
|
||||||
|
|
||||||
let version = brocade.version().await.unwrap();
|
let version = brocade.version().await.unwrap();
|
||||||
println!("Version: {version:?}");
|
println!("Version: {version:?}");
|
||||||
|
|
||||||
println!("--------------");
|
println!("--------------");
|
||||||
let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
|
println!("Showing MAC Address table...");
|
||||||
|
|
||||||
|
let mac_adddresses = brocade.show_mac_address_table().await.unwrap();
|
||||||
println!("VLAN\tMAC\t\t\tPORT");
|
println!("VLAN\tMAC\t\t\tPORT");
|
||||||
for mac in mac_adddresses {
|
for mac in mac_adddresses {
|
||||||
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
|
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("--------------");
|
println!("--------------");
|
||||||
let channel_name = "1";
|
let channel_name = "HARMONY_LAG";
|
||||||
|
println!("Clearing port channel '{channel_name}'...");
|
||||||
|
|
||||||
brocade.clear_port_channel(channel_name).await.unwrap();
|
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||||
|
|
||||||
|
println!("Cleared");
|
||||||
|
|
||||||
println!("--------------");
|
println!("--------------");
|
||||||
|
println!("Finding next available channel...");
|
||||||
|
|
||||||
let channel_id = brocade.find_available_channel_id().await.unwrap();
|
let channel_id = brocade.find_available_channel_id().await.unwrap();
|
||||||
|
println!("Channel id: {channel_id}");
|
||||||
|
|
||||||
println!("--------------");
|
println!("--------------");
|
||||||
let channel_name = "HARMONY_LAG";
|
let channel_name = "HARMONY_LAG";
|
||||||
let ports = [PortLocation(2, 0, 35)];
|
let ports = [PortLocation(1, 1, 3), PortLocation(1, 1, 4)];
|
||||||
|
println!("Creating port channel '{channel_name}' with ports {ports:?}'...");
|
||||||
|
|
||||||
brocade
|
brocade
|
||||||
.create_port_channel(channel_id, channel_name, &ports)
|
.create_port_channel(channel_id, channel_name, &ports)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
println!("Created");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use super::BrocadeClient;
|
use super::BrocadeClient;
|
||||||
use crate::{
|
use crate::{
|
||||||
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
BrocadeInfo, Error, ExecutionMode, MacAddressEntry, PortChannelId, parse_brocade_mac_address,
|
||||||
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
|
shell::BrocadeShell,
|
||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -11,22 +11,12 @@ use regex::Regex;
|
|||||||
use std::{collections::HashSet, str::FromStr};
|
use std::{collections::HashSet, str::FromStr};
|
||||||
|
|
||||||
pub struct FastIronClient {
|
pub struct FastIronClient {
|
||||||
shell: BrocadeShell,
|
pub shell: BrocadeShell,
|
||||||
version: BrocadeInfo,
|
pub version: BrocadeInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastIronClient {
|
impl FastIronClient {
|
||||||
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
pub fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
||||||
shell.before_all(vec!["skip-page-display".into()]);
|
|
||||||
shell.after_all(vec!["page".into()]);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
shell,
|
|
||||||
version: version_info,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
|
||||||
debug!("[Brocade] Parsing mac address entry: {line}");
|
debug!("[Brocade] Parsing mac address entry: {line}");
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
if parts.len() < 3 {
|
if parts.len() < 3 {
|
||||||
@@ -59,25 +49,10 @@ impl FastIronClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
pub fn build_port_channel_commands(
|
||||||
debug!("[Brocade] Parsing stack port entry: {line}");
|
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
||||||
if parts.len() < 10 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let local_port = PortLocation::from_str(parts[0]).ok()?;
|
|
||||||
|
|
||||||
Some(Ok(InterSwitchLink {
|
|
||||||
local_port,
|
|
||||||
remote_port: None,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_port_channel_commands(
|
|
||||||
&self,
|
&self,
|
||||||
channel_id: PortChannelId,
|
|
||||||
channel_name: &str,
|
channel_name: &str,
|
||||||
|
channel_id: u8,
|
||||||
ports: &[PortLocation],
|
ports: &[PortLocation],
|
||||||
) -> Vec<String> {
|
) -> Vec<String> {
|
||||||
let mut commands = vec![
|
let mut commands = vec![
|
||||||
@@ -105,7 +80,7 @@ impl BrocadeClient for FastIronClient {
|
|||||||
Ok(self.version.clone())
|
Ok(self.version.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
async fn show_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||||
info!("[Brocade] Showing MAC address table...");
|
info!("[Brocade] Showing MAC address table...");
|
||||||
|
|
||||||
let output = self
|
let output = self
|
||||||
@@ -120,30 +95,6 @@ impl BrocadeClient for FastIronClient {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
|
||||||
let output = self
|
|
||||||
.shell
|
|
||||||
.run_command("show interface stack-ports", crate::ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
output
|
|
||||||
.lines()
|
|
||||||
.skip(1)
|
|
||||||
.filter_map(|line| self.parse_stack_port_entry(line))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_interfaces(
|
|
||||||
&self,
|
|
||||||
_interfaces: Vec<(String, PortOperatingMode)>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||||
info!("[Brocade] Finding next available channel id...");
|
info!("[Brocade] Finding next available channel id...");
|
||||||
|
|
||||||
@@ -184,7 +135,7 @@ impl BrocadeClient for FastIronClient {
|
|||||||
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
|
let commands = self.build_port_channel_commands(channel_name, channel_id, ports);
|
||||||
self.shell
|
self.shell
|
||||||
.run_commands(commands, ExecutionMode::Privileged)
|
.run_commands(commands, ExecutionMode::Privileged)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -194,7 +145,7 @@ impl BrocadeClient for FastIronClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
||||||
info!("[Brocade] Clearing port-channel: {channel_name}");
|
debug!("[Brocade] Clearing port-channel: {channel_name}");
|
||||||
|
|
||||||
let commands = vec![
|
let commands = vec![
|
||||||
"configure terminal".to_string(),
|
"configure terminal".to_string(),
|
||||||
@@ -205,7 +156,6 @@ impl BrocadeClient for FastIronClient {
|
|||||||
.run_commands(commands, ExecutionMode::Privileged)
|
.run_commands(commands, ExecutionMode::Privileged)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use std::{
|
|||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::network_operating_system::NetworkOperatingSystemClient;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
fast_iron::FastIronClient,
|
fast_iron::FastIronClient,
|
||||||
shell::{BrocadeSession, BrocadeShell},
|
shell::{BrocadeSession, BrocadeShell},
|
||||||
@@ -16,7 +15,6 @@ use harmony_types::switch::{PortDeclaration, PortLocation};
|
|||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
|
|
||||||
mod fast_iron;
|
mod fast_iron;
|
||||||
mod network_operating_system;
|
|
||||||
mod shell;
|
mod shell;
|
||||||
mod ssh;
|
mod ssh;
|
||||||
|
|
||||||
@@ -38,7 +36,7 @@ pub struct TimeoutConfig {
|
|||||||
impl Default for TimeoutConfig {
|
impl Default for TimeoutConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
shell_ready: Duration::from_secs(10),
|
shell_ready: Duration::from_secs(3),
|
||||||
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
||||||
cleanup: Duration::from_secs(10),
|
cleanup: Duration::from_secs(10),
|
||||||
message_wait: Duration::from_millis(500),
|
message_wait: Duration::from_millis(500),
|
||||||
@@ -73,70 +71,6 @@ pub struct MacAddressEntry {
|
|||||||
|
|
||||||
pub type PortChannelId = u8;
|
pub type PortChannelId = u8;
|
||||||
|
|
||||||
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
|
|
||||||
///
|
|
||||||
/// This structure provides a standardized view of the topology regardless of the
|
|
||||||
/// underlying Brocade OS configuration (stacking vs. fabric).
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub struct InterSwitchLink {
|
|
||||||
/// The local port on the switch where the topology command was run.
|
|
||||||
pub local_port: PortLocation,
|
|
||||||
/// The port on the directly connected neighboring switch.
|
|
||||||
pub remote_port: Option<PortLocation>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents the key running configuration status of a single switch interface.
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub struct InterfaceInfo {
|
|
||||||
/// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
|
|
||||||
pub name: String,
|
|
||||||
/// The physical location of the interface.
|
|
||||||
pub port_location: PortLocation,
|
|
||||||
/// The parsed type and name prefix of the interface.
|
|
||||||
pub interface_type: InterfaceType,
|
|
||||||
/// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
|
|
||||||
pub operating_mode: Option<PortOperatingMode>,
|
|
||||||
/// Indicates the current state of the interface.
|
|
||||||
pub status: InterfaceStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Categorizes the functional type of a switch interface.
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub enum InterfaceType {
|
|
||||||
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
|
|
||||||
Ethernet(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for InterfaceType {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
InterfaceType::Ethernet(name) => write!(f, "{name}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub enum PortOperatingMode {
|
|
||||||
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
|
||||||
Fabric,
|
|
||||||
/// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
|
|
||||||
Trunk,
|
|
||||||
/// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
|
|
||||||
Access,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Defines the possible status of an interface.
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub enum InterfaceStatus {
|
|
||||||
/// The interface is connected.
|
|
||||||
Connected,
|
|
||||||
/// The interface is not connected and is not expected to be.
|
|
||||||
NotConnected,
|
|
||||||
/// The interface is not connected but is expected to be (configured with `no shutdown`).
|
|
||||||
SfpAbsent,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn init(
|
pub async fn init(
|
||||||
ip_addresses: &[IpAddr],
|
ip_addresses: &[IpAddr],
|
||||||
port: u16,
|
port: u16,
|
||||||
@@ -153,81 +87,23 @@ pub async fn init(
|
|||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(match version_info.os {
|
Ok(match version_info.os {
|
||||||
BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
|
BrocadeOs::FastIron => Box::new(FastIronClient {
|
||||||
BrocadeOs::NetworkOperatingSystem => {
|
shell,
|
||||||
Box::new(NetworkOperatingSystemClient::init(shell, version_info))
|
version: version_info,
|
||||||
}
|
}),
|
||||||
|
BrocadeOs::NetworkOperatingSystem => todo!(),
|
||||||
BrocadeOs::Unknown => todo!(),
|
BrocadeOs::Unknown => todo!(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait BrocadeClient {
|
pub trait BrocadeClient {
|
||||||
/// Retrieves the operating system and version details from the connected Brocade switch.
|
|
||||||
///
|
|
||||||
/// This is typically the first call made after establishing a connection to determine
|
|
||||||
/// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// A `BrocadeInfo` structure containing parsed OS type and version string.
|
|
||||||
async fn version(&self) -> Result<BrocadeInfo, Error>;
|
async fn version(&self) -> Result<BrocadeInfo, Error>;
|
||||||
|
|
||||||
/// Retrieves the dynamically learned MAC address table from the switch.
|
async fn show_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
|
||||||
///
|
|
||||||
/// This is crucial for discovering where specific network endpoints (MAC addresses)
|
|
||||||
/// are currently located on the physical ports.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
|
|
||||||
/// and the associated port name/index.
|
|
||||||
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
|
|
||||||
|
|
||||||
/// Derives the physical connections used to link multiple switches together
|
|
||||||
/// to form a single logical entity (stack, fabric, etc.).
|
|
||||||
///
|
|
||||||
/// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
|
|
||||||
/// to return a standardized view of the topology.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
|
|
||||||
/// If the switch is not stacked, returns an empty vector.
|
|
||||||
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
|
|
||||||
|
|
||||||
/// Retrieves the status for all interfaces
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// A vector of `InterfaceInfo` structures.
|
|
||||||
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
|
|
||||||
|
|
||||||
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
|
||||||
async fn configure_interfaces(
|
|
||||||
&self,
|
|
||||||
interfaces: Vec<(String, PortOperatingMode)>,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// Scans the existing configuration to find the next available (unused)
|
|
||||||
/// Port-Channel ID (`lag` or `trunk`) for assignment.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// The smallest, unassigned `PortChannelId` within the supported range.
|
|
||||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
|
||||||
|
|
||||||
/// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
|
|
||||||
/// using the specified channel ID and ports.
|
|
||||||
///
|
|
||||||
/// The resulting configuration must be persistent (saved to startup-config).
|
|
||||||
/// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
///
|
|
||||||
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
|
|
||||||
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
|
|
||||||
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
|
|
||||||
async fn create_port_channel(
|
async fn create_port_channel(
|
||||||
&self,
|
&self,
|
||||||
channel_id: PortChannelId,
|
channel_id: PortChannelId,
|
||||||
@@ -235,15 +111,6 @@ pub trait BrocadeClient {
|
|||||||
ports: &[PortLocation],
|
ports: &[PortLocation],
|
||||||
) -> Result<(), Error>;
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
/// Removes all configuration associated with the specified Port-Channel name.
|
|
||||||
///
|
|
||||||
/// This operation should be idempotent; attempting to clear a non-existent
|
|
||||||
/// channel should succeed (or return a benign error).
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
///
|
|
||||||
/// * `channel_name`: The name of the Port-Channel (LAG) to delete.
|
|
||||||
///
|
|
||||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,306 +0,0 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
|
||||||
use log::{debug, info};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
|
||||||
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
|
||||||
parse_brocade_mac_address, shell::BrocadeShell,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct NetworkOperatingSystemClient {
|
|
||||||
shell: BrocadeShell,
|
|
||||||
version: BrocadeInfo,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetworkOperatingSystemClient {
|
|
||||||
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
|
||||||
shell.before_all(vec!["terminal length 0".into()]);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
shell,
|
|
||||||
version: version_info,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
|
||||||
debug!("[Brocade] Parsing mac address entry: {line}");
|
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
||||||
if parts.len() < 5 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (vlan, mac_address, port) = match parts.len() {
|
|
||||||
5 => (
|
|
||||||
u16::from_str(parts[0]).ok()?,
|
|
||||||
parse_brocade_mac_address(parts[1]).ok()?,
|
|
||||||
parts[4].to_string(),
|
|
||||||
),
|
|
||||||
_ => (
|
|
||||||
u16::from_str(parts[0]).ok()?,
|
|
||||||
parse_brocade_mac_address(parts[1]).ok()?,
|
|
||||||
parts[5].to_string(),
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
let port =
|
|
||||||
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
|
||||||
|
|
||||||
match port {
|
|
||||||
Ok(p) => Some(Ok(MacAddressEntry {
|
|
||||||
vlan,
|
|
||||||
mac_address,
|
|
||||||
port: p,
|
|
||||||
})),
|
|
||||||
Err(e) => Some(Err(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
|
||||||
debug!("[Brocade] Parsing inter switch link entry: {line}");
|
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
||||||
if parts.len() < 10 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let local_port = PortLocation::from_str(parts[2]).ok()?;
|
|
||||||
let remote_port = PortLocation::from_str(parts[5]).ok()?;
|
|
||||||
|
|
||||||
Some(Ok(InterSwitchLink {
|
|
||||||
local_port,
|
|
||||||
remote_port: Some(remote_port),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
|
|
||||||
debug!("[Brocade] Parsing interface status entry: {line}");
|
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
||||||
if parts.len() < 6 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let interface_type = match parts[0] {
|
|
||||||
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
|
|
||||||
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
|
|
||||||
_ => return None,
|
|
||||||
};
|
|
||||||
let port_location = PortLocation::from_str(parts[1]).ok()?;
|
|
||||||
let status = match parts[2] {
|
|
||||||
"connected" => InterfaceStatus::Connected,
|
|
||||||
"notconnected" => InterfaceStatus::NotConnected,
|
|
||||||
"sfpAbsent" => InterfaceStatus::SfpAbsent,
|
|
||||||
_ => return None,
|
|
||||||
};
|
|
||||||
let operating_mode = match parts[3] {
|
|
||||||
"ISL" => Some(PortOperatingMode::Fabric),
|
|
||||||
"Trunk" => Some(PortOperatingMode::Trunk),
|
|
||||||
"Access" => Some(PortOperatingMode::Access),
|
|
||||||
"--" => None,
|
|
||||||
_ => return None,
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(Ok(InterfaceInfo {
|
|
||||||
name: format!("{} {}", interface_type, port_location),
|
|
||||||
port_location,
|
|
||||||
interface_type,
|
|
||||||
operating_mode,
|
|
||||||
status,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl BrocadeClient for NetworkOperatingSystemClient {
|
|
||||||
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
|
||||||
Ok(self.version.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
|
||||||
let output = self
|
|
||||||
.shell
|
|
||||||
.run_command("show mac-address-table", ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
output
|
|
||||||
.lines()
|
|
||||||
.skip(1)
|
|
||||||
.filter_map(|line| self.parse_mac_entry(line))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
|
||||||
let output = self
|
|
||||||
.shell
|
|
||||||
.run_command("show fabric isl", ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
output
|
|
||||||
.lines()
|
|
||||||
.skip(6)
|
|
||||||
.filter_map(|line| self.parse_inter_switch_link_entry(line))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
|
||||||
let output = self
|
|
||||||
.shell
|
|
||||||
.run_command(
|
|
||||||
"show interface status rbridge-id all",
|
|
||||||
ExecutionMode::Regular,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
output
|
|
||||||
.lines()
|
|
||||||
.skip(2)
|
|
||||||
.filter_map(|line| self.parse_interface_status_entry(line))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_interfaces(
|
|
||||||
&self,
|
|
||||||
interfaces: Vec<(String, PortOperatingMode)>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
|
||||||
|
|
||||||
let mut commands = vec!["configure terminal".to_string()];
|
|
||||||
|
|
||||||
for interface in interfaces {
|
|
||||||
commands.push(format!("interface {}", interface.0));
|
|
||||||
|
|
||||||
match interface.1 {
|
|
||||||
PortOperatingMode::Fabric => {
|
|
||||||
commands.push("fabric isl enable".into());
|
|
||||||
commands.push("fabric trunk enable".into());
|
|
||||||
}
|
|
||||||
PortOperatingMode::Trunk => {
|
|
||||||
commands.push("switchport".into());
|
|
||||||
commands.push("switchport mode trunk".into());
|
|
||||||
commands.push("no spanning-tree shutdown".into());
|
|
||||||
commands.push("no fabric isl enable".into());
|
|
||||||
commands.push("no fabric trunk enable".into());
|
|
||||||
}
|
|
||||||
PortOperatingMode::Access => {
|
|
||||||
commands.push("switchport".into());
|
|
||||||
commands.push("switchport mode access".into());
|
|
||||||
commands.push("switchport access vlan 1".into());
|
|
||||||
commands.push("no spanning-tree shutdown".into());
|
|
||||||
commands.push("no fabric isl enable".into());
|
|
||||||
commands.push("no fabric trunk enable".into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
commands.push("no shutdown".into());
|
|
||||||
commands.push("exit".into());
|
|
||||||
}
|
|
||||||
|
|
||||||
commands.push("write memory".into());
|
|
||||||
|
|
||||||
self.shell
|
|
||||||
.run_commands(commands, ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("[Brocade] Interfaces configured.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
|
||||||
info!("[Brocade] Finding next available channel id...");
|
|
||||||
|
|
||||||
let output = self
|
|
||||||
.shell
|
|
||||||
.run_command("show port-channel", ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let used_ids: Vec<u8> = output
|
|
||||||
.lines()
|
|
||||||
.skip(6)
|
|
||||||
.filter_map(|line| {
|
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
||||||
if parts.len() < 8 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
u8::from_str(parts[0]).ok()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut next_id: u8 = 1;
|
|
||||||
loop {
|
|
||||||
if !used_ids.contains(&next_id) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
next_id += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("[Brocade] Found channel id: {next_id}");
|
|
||||||
Ok(next_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_port_channel(
|
|
||||||
&self,
|
|
||||||
channel_id: PortChannelId,
|
|
||||||
channel_name: &str,
|
|
||||||
ports: &[PortLocation],
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
info!(
|
|
||||||
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
|
||||||
);
|
|
||||||
|
|
||||||
let interfaces = self.get_interfaces().await?;
|
|
||||||
|
|
||||||
let mut commands = vec![
|
|
||||||
"configure terminal".into(),
|
|
||||||
format!("interface port-channel {}", channel_id),
|
|
||||||
"no shutdown".into(),
|
|
||||||
"exit".into(),
|
|
||||||
];
|
|
||||||
|
|
||||||
for port in ports {
|
|
||||||
let interface = interfaces.iter().find(|i| i.port_location == *port);
|
|
||||||
let Some(interface) = interface else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
commands.push(format!("interface {}", interface.name));
|
|
||||||
commands.push("no switchport".into());
|
|
||||||
commands.push("no ip address".into());
|
|
||||||
commands.push("no fabric isl enable".into());
|
|
||||||
commands.push("no fabric trunk enable".into());
|
|
||||||
commands.push(format!("channel-group {channel_id} mode active"));
|
|
||||||
commands.push("no shutdown".into());
|
|
||||||
commands.push("exit".into());
|
|
||||||
}
|
|
||||||
|
|
||||||
commands.push("write memory".into());
|
|
||||||
|
|
||||||
self.shell
|
|
||||||
.run_commands(commands, ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
|
||||||
info!("[Brocade] Clearing port-channel: {channel_name}");
|
|
||||||
|
|
||||||
let commands = vec![
|
|
||||||
"configure terminal".into(),
|
|
||||||
format!("no interface port-channel {}", channel_name),
|
|
||||||
"exit".into(),
|
|
||||||
"write memory".into(),
|
|
||||||
];
|
|
||||||
|
|
||||||
self.shell
|
|
||||||
.run_commands(commands, ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,13 +14,11 @@ use russh::ChannelMsg;
|
|||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
|
|
||||||
pub struct BrocadeShell {
|
pub struct BrocadeShell {
|
||||||
ip: IpAddr,
|
pub ip: IpAddr,
|
||||||
port: u16,
|
pub port: u16,
|
||||||
username: String,
|
pub username: String,
|
||||||
password: String,
|
pub password: String,
|
||||||
options: BrocadeOptions,
|
pub options: BrocadeOptions,
|
||||||
before_all_commands: Vec<String>,
|
|
||||||
after_all_commands: Vec<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BrocadeShell {
|
impl BrocadeShell {
|
||||||
@@ -43,8 +41,6 @@ impl BrocadeShell {
|
|||||||
port,
|
port,
|
||||||
username: username.to_string(),
|
username: username.to_string(),
|
||||||
password: password.to_string(),
|
password: password.to_string(),
|
||||||
before_all_commands: vec![],
|
|
||||||
after_all_commands: vec![],
|
|
||||||
options,
|
options,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -70,22 +66,14 @@ impl BrocadeShell {
|
|||||||
>,
|
>,
|
||||||
{
|
{
|
||||||
let mut session = self.open_session(mode).await?;
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
|
||||||
let result = callback(&mut session).await;
|
let result = callback(&mut session).await;
|
||||||
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
|
||||||
|
|
||||||
session.close().await?;
|
session.close().await?;
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
|
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
|
||||||
let mut session = self.open_session(mode).await?;
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
|
||||||
let result = session.run_command(command).await;
|
let result = session.run_command(command).await;
|
||||||
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
|
||||||
|
|
||||||
session.close().await?;
|
session.close().await?;
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
@@ -96,22 +84,10 @@ impl BrocadeShell {
|
|||||||
mode: ExecutionMode,
|
mode: ExecutionMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut session = self.open_session(mode).await?;
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
|
||||||
let result = session.run_commands(commands).await;
|
let result = session.run_commands(commands).await;
|
||||||
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
|
||||||
|
|
||||||
session.close().await?;
|
session.close().await?;
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn before_all(&mut self, commands: Vec<String>) {
|
|
||||||
self.before_all_commands = commands;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn after_all(&mut self, commands: Vec<String>) {
|
|
||||||
self.after_all_commands = commands;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BrocadeSession {
|
pub struct BrocadeSession {
|
||||||
@@ -172,10 +148,6 @@ impl BrocadeSession {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
|
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
|
||||||
if self.should_skip_command(command) {
|
|
||||||
return Ok(String::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("[Brocade] Running command: '{command}'...");
|
debug!("[Brocade] Running command: '{command}'...");
|
||||||
|
|
||||||
self.channel
|
self.channel
|
||||||
@@ -198,15 +170,7 @@ impl BrocadeSession {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_skip_command(&self, command: &str) -> bool {
|
pub async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
|
||||||
if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
|
|
||||||
info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
|
|
||||||
let mut output = Vec::new();
|
let mut output = Vec::new();
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let read_timeout = Duration::from_millis(500);
|
let read_timeout = Duration::from_millis(500);
|
||||||
@@ -258,7 +222,7 @@ impl BrocadeSession {
|
|||||||
Ok(output)
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
|
pub fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
|
||||||
const ERROR_PATTERNS: &[&str] = &[
|
const ERROR_PATTERNS: &[&str] = &[
|
||||||
"invalid input",
|
"invalid input",
|
||||||
"syntax error",
|
"syntax error",
|
||||||
@@ -290,7 +254,7 @@ impl BrocadeSession {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_shell_ready(
|
pub async fn wait_for_shell_ready(
|
||||||
channel: &mut russh::Channel<russh::client::Msg>,
|
channel: &mut russh::Channel<russh::client::Msg>,
|
||||||
timeouts: &TimeoutConfig,
|
timeouts: &TimeoutConfig,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@@ -302,7 +266,6 @@ async fn wait_for_shell_ready(
|
|||||||
Ok(Some(ChannelMsg::Data { data })) => {
|
Ok(Some(ChannelMsg::Data { data })) => {
|
||||||
buffer.extend_from_slice(&data);
|
buffer.extend_from_slice(&data);
|
||||||
let output = String::from_utf8_lossy(&buffer);
|
let output = String::from_utf8_lossy(&buffer);
|
||||||
let output = output.trim();
|
|
||||||
if output.ends_with('>') || output.ends_with('#') {
|
if output.ends_with('>') || output.ends_with('#') {
|
||||||
debug!("[Brocade] Shell ready");
|
debug!("[Brocade] Shell ready");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@@ -316,7 +279,7 @@ async fn wait_for_shell_ready(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_elevate_session(
|
pub async fn try_elevate_session(
|
||||||
channel: &mut russh::Channel<russh::client::Msg>,
|
channel: &mut russh::Channel<russh::client::Msg>,
|
||||||
username: &str,
|
username: &str,
|
||||||
password: &str,
|
password: &str,
|
||||||
|
|||||||
@@ -122,7 +122,6 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Creating OPNsense VM using serial image..."
|
echo "Creating OPNsense VM using serial image..."
|
||||||
|
|
||||||
virt-install \
|
virt-install \
|
||||||
--connect "${CONNECT_URI}" \
|
--connect "${CONNECT_URI}" \
|
||||||
--name "${VM_OPN}" \
|
--name "${VM_OPN}" \
|
||||||
@@ -132,12 +131,11 @@ EOF
|
|||||||
--os-variant "${OS_VARIANT_OPN}" \
|
--os-variant "${OS_VARIANT_OPN}" \
|
||||||
--graphics none \
|
--graphics none \
|
||||||
--noautoconsole \
|
--noautoconsole \
|
||||||
--import \
|
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
|
||||||
--disk path="${OPN_IMG_PATH}",device=disk,bus=sata,boot.order=1 \
|
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
|
||||||
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=2 \
|
|
||||||
--network network=default,model=virtio \
|
--network network=default,model=virtio \
|
||||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
--network network="${NET_HARMONYLAN}",model=virtio \
|
||||||
--boot hd,menu=on
|
--boot uefi,menu=on
|
||||||
|
|
||||||
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
|
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
|
||||||
echo "The VM will boot from the serial installation image."
|
echo "The VM will boot from the serial installation image."
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example-openbao"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { path = "../../harmony" }
|
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
|
||||||
harmony_types = { path = "../../harmony_types" }
|
|
||||||
tokio.workspace = true
|
|
||||||
url.workspace = true
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
To install an openbao instance with harmony simply `cargo run -p example-openbao` .
|
|
||||||
|
|
||||||
Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster.
|
|
||||||
|
|
||||||
Then follow the openbao documentation to initialize and unseal, this will make openbao usable.
|
|
||||||
|
|
||||||
https://openbao.org/docs/platform/k8s/helm/run/
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
use std::{collections::HashMap, str::FromStr};
|
|
||||||
|
|
||||||
use harmony::{
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
use harmony_macros::hurl;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let values_yaml = Some(
|
|
||||||
r#"server:
|
|
||||||
standalone:
|
|
||||||
enabled: true
|
|
||||||
config: |
|
|
||||||
listener "tcp" {
|
|
||||||
tls_disable = true
|
|
||||||
address = "[::]:8200"
|
|
||||||
cluster_address = "[::]:8201"
|
|
||||||
}
|
|
||||||
|
|
||||||
storage "file" {
|
|
||||||
path = "/openbao/data"
|
|
||||||
}
|
|
||||||
|
|
||||||
service:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
dataStorage:
|
|
||||||
enabled: true
|
|
||||||
size: 10Gi
|
|
||||||
storageClass: null
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
|
|
||||||
auditStorage:
|
|
||||||
enabled: true
|
|
||||||
size: 10Gi
|
|
||||||
storageClass: null
|
|
||||||
accessMode: ReadWriteOnce"#
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
let openbao = HelmChartScore {
|
|
||||||
namespace: Some(NonBlankString::from_str("openbao").unwrap()),
|
|
||||||
release_name: NonBlankString::from_str("openbao").unwrap(),
|
|
||||||
chart_name: NonBlankString::from_str("openbao/openbao").unwrap(),
|
|
||||||
chart_version: None,
|
|
||||||
values_overrides: None,
|
|
||||||
values_yaml,
|
|
||||||
create_namespace: true,
|
|
||||||
install_only: true,
|
|
||||||
repository: Some(HelmRepository::new(
|
|
||||||
"openbao".to_string(),
|
|
||||||
hurl!("https://openbao.github.io/openbao-helm"),
|
|
||||||
true,
|
|
||||||
)),
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
|
||||||
K8sAnywhereTopology::from_env(),
|
|
||||||
vec![Box::new(openbao)],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example-remove-rook-osd"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
|
||||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
|
||||||
tokio.workspace = true
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
use harmony::{
|
|
||||||
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let ceph_score = CephRemoveOsd {
|
|
||||||
osd_deployment_name: "rook-ceph-osd-2".to_string(),
|
|
||||||
rook_ceph_namespace: "rook-ceph".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let topology = K8sAnywhereTopology::from_env();
|
|
||||||
let inventory = Inventory::autoload();
|
|
||||||
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -78,7 +78,6 @@ askama.workspace = true
|
|||||||
sqlx.workspace = true
|
sqlx.workspace = true
|
||||||
inquire.workspace = true
|
inquire.workspace = true
|
||||||
brocade = { path = "../brocade" }
|
brocade = { path = "../brocade" }
|
||||||
option-ext = "0.2.0"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ pub type FirewallGroup = Vec<PhysicalHost>;
|
|||||||
pub struct PhysicalHost {
|
pub struct PhysicalHost {
|
||||||
pub id: Id,
|
pub id: Id,
|
||||||
pub category: HostCategory,
|
pub category: HostCategory,
|
||||||
pub network: Vec<NetworkInterface>,
|
pub network: Vec<NetworkInterface>, // FIXME: Don't use harmony_inventory_agent::NetworkInterface
|
||||||
pub storage: Vec<StorageDrive>,
|
pub storage: Vec<StorageDrive>, // FIXME: Don't use harmony_inventory_agent::StorageDrive
|
||||||
pub labels: Vec<Label>,
|
pub labels: Vec<Label>,
|
||||||
pub memory_modules: Vec<MemoryModule>,
|
pub memory_modules: Vec<MemoryModule>, // FIXME: Don't use harmony_inventory_agent::MemoryModule
|
||||||
pub cpus: Vec<CPU>,
|
pub cpus: Vec<CPU>, // FIXME: Don't use harmony_inventory_agent::CPU
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PhysicalHost {
|
impl PhysicalHost {
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ pub enum InterpretName {
|
|||||||
Lamp,
|
Lamp,
|
||||||
ApplicationMonitoring,
|
ApplicationMonitoring,
|
||||||
K8sPrometheusCrdAlerting,
|
K8sPrometheusCrdAlerting,
|
||||||
CephRemoveOsd,
|
|
||||||
DiscoverInventoryAgent,
|
DiscoverInventoryAgent,
|
||||||
CephClusterHealth,
|
CephClusterHealth,
|
||||||
Custom(&'static str),
|
Custom(&'static str),
|
||||||
@@ -62,7 +61,6 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::Lamp => f.write_str("LAMP"),
|
InterpretName::Lamp => f.write_str("LAMP"),
|
||||||
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
||||||
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
||||||
InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"),
|
|
||||||
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
||||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||||
InterpretName::Custom(name) => f.write_str(name),
|
InterpretName::Custom(name) => f.write_str(name),
|
||||||
|
|||||||
@@ -2,10 +2,9 @@ use async_trait::async_trait;
|
|||||||
use brocade::BrocadeOptions;
|
use brocade::BrocadeOptions;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_secret::SecretManager;
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::{
|
use harmony_types::net::MacAddress;
|
||||||
net::{MacAddress, Url},
|
use harmony_types::net::Url;
|
||||||
switch::PortLocation,
|
use harmony_types::switch::PortLocation;
|
||||||
};
|
|
||||||
use k8s_openapi::api::core::v1::Namespace;
|
use k8s_openapi::api::core::v1::Namespace;
|
||||||
use kube::api::ObjectMeta;
|
use kube::api::ObjectMeta;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
@@ -16,19 +15,40 @@ use crate::executors::ExecutorError;
|
|||||||
use crate::hardware::PhysicalHost;
|
use crate::hardware::PhysicalHost;
|
||||||
use crate::infra::brocade::BrocadeSwitchAuth;
|
use crate::infra::brocade::BrocadeSwitchAuth;
|
||||||
use crate::infra::brocade::BrocadeSwitchClient;
|
use crate::infra::brocade::BrocadeSwitchClient;
|
||||||
use crate::modules::okd::crd::{
|
use crate::modules::okd::crd::InstallPlanApproval;
|
||||||
InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec,
|
use crate::modules::okd::crd::OperatorGroup;
|
||||||
nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec},
|
use crate::modules::okd::crd::OperatorGroupSpec;
|
||||||
};
|
use crate::modules::okd::crd::Subscription;
|
||||||
|
use crate::modules::okd::crd::SubscriptionSpec;
|
||||||
|
use crate::modules::okd::crd::nmstate;
|
||||||
|
use crate::modules::okd::crd::nmstate::NMState;
|
||||||
|
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicy;
|
||||||
|
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec;
|
||||||
use crate::topology::PxeOptions;
|
use crate::topology::PxeOptions;
|
||||||
|
|
||||||
use super::{
|
use super::DHCPStaticEntry;
|
||||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
use super::DhcpServer;
|
||||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
use super::DnsRecord;
|
||||||
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
use super::DnsRecordType;
|
||||||
Topology, k8s::K8sClient,
|
use super::DnsServer;
|
||||||
};
|
use super::Firewall;
|
||||||
|
use super::HostNetworkConfig;
|
||||||
|
use super::HttpServer;
|
||||||
|
use super::IpAddress;
|
||||||
|
use super::K8sclient;
|
||||||
|
use super::LoadBalancer;
|
||||||
|
use super::LoadBalancerService;
|
||||||
|
use super::LogicalHost;
|
||||||
|
use super::PreparationError;
|
||||||
|
use super::PreparationOutcome;
|
||||||
|
use super::Router;
|
||||||
|
use super::Switch;
|
||||||
|
use super::SwitchClient;
|
||||||
|
use super::SwitchError;
|
||||||
|
use super::TftpServer;
|
||||||
|
|
||||||
|
use super::Topology;
|
||||||
|
use super::k8s::K8sClient;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -493,12 +513,6 @@ impl HttpServer for HAClusterTopology {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Switch for HAClusterTopology {
|
impl Switch for HAClusterTopology {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
|
||||||
let client = self.get_switch_client().await?;
|
|
||||||
client.setup().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_port_for_mac_address(
|
async fn get_port_for_mac_address(
|
||||||
&self,
|
&self,
|
||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
@@ -513,7 +527,7 @@ impl Switch for HAClusterTopology {
|
|||||||
host: &PhysicalHost,
|
host: &PhysicalHost,
|
||||||
config: HostNetworkConfig,
|
config: HostNetworkConfig,
|
||||||
) -> Result<(), SwitchError> {
|
) -> Result<(), SwitchError> {
|
||||||
self.configure_bond(host, &config).await?;
|
// self.configure_bond(host, &config).await?;
|
||||||
self.configure_port_channel(host, &config).await
|
self.configure_port_channel(host, &config).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,13 @@
|
|||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use k8s_openapi::{
|
use k8s_openapi::{
|
||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{apps::v1::Deployment, core::v1::Pod},
|
api::{apps::v1::Deployment, core::v1::Pod},
|
||||||
apimachinery::pkg::version::Info,
|
|
||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Discovery, Error, Resource,
|
Client, Config, Error, Resource,
|
||||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::ErrorResponse,
|
core::ErrorResponse,
|
||||||
error::DiscoveryError,
|
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
};
|
};
|
||||||
use kube::{api::DynamicObject, runtime::conditions};
|
use kube::{api::DynamicObject, runtime::conditions};
|
||||||
@@ -19,11 +15,11 @@ use kube::{
|
|||||||
api::{ApiResource, GroupVersionKind},
|
api::{ApiResource, GroupVersionKind},
|
||||||
runtime::wait::await_condition,
|
runtime::wait::await_condition,
|
||||||
};
|
};
|
||||||
use log::{debug, error, info, trace};
|
use log::{debug, error, trace};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::{Value, json};
|
use serde_json::{Value, json};
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
use tokio::{io::AsyncReadExt, time::sleep};
|
use tokio::io::AsyncReadExt;
|
||||||
|
|
||||||
#[derive(new, Clone)]
|
#[derive(new, Clone)]
|
||||||
pub struct K8sClient {
|
pub struct K8sClient {
|
||||||
@@ -57,17 +53,6 @@ impl K8sClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
|
||||||
let client: Client = self.client.clone();
|
|
||||||
let version_info: Info = client.apiserver_version().await?;
|
|
||||||
Ok(version_info)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn discovery(&self) -> Result<Discovery, Error> {
|
|
||||||
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
|
|
||||||
Ok(discovery)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_resource_json_value(
|
pub async fn get_resource_json_value(
|
||||||
&self,
|
&self,
|
||||||
name: &str,
|
name: &str,
|
||||||
@@ -89,13 +74,10 @@ impl K8sClient {
|
|||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
) -> Result<Option<Deployment>, Error> {
|
) -> Result<Option<Deployment>, Error> {
|
||||||
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
||||||
debug!("getting namespaced deployment");
|
|
||||||
Api::namespaced(self.client.clone(), ns)
|
Api::namespaced(self.client.clone(), ns)
|
||||||
} else {
|
} else {
|
||||||
debug!("getting default namespace deployment");
|
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
|
||||||
Ok(deps.get_opt(name).await?)
|
Ok(deps.get_opt(name).await?)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,7 +108,7 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
let pp = PatchParams::default();
|
let pp = PatchParams::default();
|
||||||
let scale = Patch::Merge(&patch);
|
let scale = Patch::Apply(&patch);
|
||||||
deployments.patch_scale(name, &pp, &scale).await?;
|
deployments.patch_scale(name, &pp, &scale).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -171,41 +153,6 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn wait_for_pod_ready(
|
|
||||||
&self,
|
|
||||||
pod_name: &str,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut elapsed = 0;
|
|
||||||
let interval = 5; // seconds between checks
|
|
||||||
let timeout_secs = 120;
|
|
||||||
loop {
|
|
||||||
let pod = self.get_pod(pod_name, namespace).await?;
|
|
||||||
|
|
||||||
if let Some(p) = pod {
|
|
||||||
if let Some(status) = p.status {
|
|
||||||
if let Some(phase) = status.phase {
|
|
||||||
if phase.to_lowercase() == "running" {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if elapsed >= timeout_secs {
|
|
||||||
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
|
||||||
"'{}' in ns '{}' did not become ready within {}s",
|
|
||||||
pod_name,
|
|
||||||
namespace.unwrap(),
|
|
||||||
timeout_secs
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_secs(interval)).await;
|
|
||||||
elapsed += interval;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Will execute a commond in the first pod found that matches the specified label
|
/// Will execute a commond in the first pod found that matches the specified label
|
||||||
/// '{label}={name}'
|
/// '{label}={name}'
|
||||||
pub async fn exec_app_capture_output(
|
pub async fn exec_app_capture_output(
|
||||||
@@ -472,12 +419,9 @@ impl K8sClient {
|
|||||||
.as_str()
|
.as_str()
|
||||||
.expect("couldn't get kind as str");
|
.expect("couldn't get kind as str");
|
||||||
|
|
||||||
let mut it = api_version.splitn(2, '/');
|
let split: Vec<&str> = api_version.splitn(2, "/").collect();
|
||||||
let first = it.next().unwrap();
|
let g = split[0];
|
||||||
let (g, v) = match it.next() {
|
let v = split[1];
|
||||||
Some(second) => (first, second),
|
|
||||||
None => ("", first),
|
|
||||||
};
|
|
||||||
|
|
||||||
let gvk = GroupVersionKind::gvk(g, v, kind);
|
let gvk = GroupVersionKind::gvk(g, v, kind);
|
||||||
let api_resource = ApiResource::from_gvk(&gvk);
|
let api_resource = ApiResource::from_gvk(&gvk);
|
||||||
|
|||||||
@@ -47,13 +47,6 @@ struct K8sState {
|
|||||||
message: String,
|
message: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum KubernetesDistribution {
|
|
||||||
OpenshiftFamily,
|
|
||||||
K3sFamily,
|
|
||||||
Default,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
enum K8sSource {
|
enum K8sSource {
|
||||||
LocalK3d,
|
LocalK3d,
|
||||||
@@ -64,7 +57,6 @@ enum K8sSource {
|
|||||||
pub struct K8sAnywhereTopology {
|
pub struct K8sAnywhereTopology {
|
||||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
||||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
||||||
k8s_distribution: Arc<OnceCell<KubernetesDistribution>>,
|
|
||||||
config: Arc<K8sAnywhereConfig>,
|
config: Arc<K8sAnywhereConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,7 +162,6 @@ impl K8sAnywhereTopology {
|
|||||||
Self {
|
Self {
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
k8s_state: Arc::new(OnceCell::new()),
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
tenant_manager: Arc::new(OnceCell::new()),
|
||||||
k8s_distribution: Arc::new(OnceCell::new()),
|
|
||||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
config: Arc::new(K8sAnywhereConfig::from_env()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -179,42 +170,10 @@ impl K8sAnywhereTopology {
|
|||||||
Self {
|
Self {
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
k8s_state: Arc::new(OnceCell::new()),
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
tenant_manager: Arc::new(OnceCell::new()),
|
||||||
k8s_distribution: Arc::new(OnceCell::new()),
|
|
||||||
config: Arc::new(config),
|
config: Arc::new(config),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
|
|
||||||
self.k8s_distribution
|
|
||||||
.get_or_try_init(async || {
|
|
||||||
let client = self.k8s_client().await.unwrap();
|
|
||||||
|
|
||||||
let discovery = client.discovery().await.map_err(|e| {
|
|
||||||
PreparationError::new(format!("Could not discover API groups: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let version = client.get_apiserver_version().await.map_err(|e| {
|
|
||||||
PreparationError::new(format!("Could not get server version: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// OpenShift / OKD
|
|
||||||
if discovery
|
|
||||||
.groups()
|
|
||||||
.any(|g| g.name() == "project.openshift.io")
|
|
||||||
{
|
|
||||||
return Ok(KubernetesDistribution::OpenshiftFamily);
|
|
||||||
}
|
|
||||||
|
|
||||||
// K3d / K3s
|
|
||||||
if version.git_version.contains("k3s") {
|
|
||||||
return Ok(KubernetesDistribution::K3sFamily);
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(KubernetesDistribution::Default);
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
async fn get_cluster_observability_operator_prometheus_application_score(
|
||||||
&self,
|
&self,
|
||||||
sender: RHOBObservability,
|
sender: RHOBObservability,
|
||||||
|
|||||||
@@ -28,7 +28,13 @@ pub trait LoadBalancer: Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
service: &LoadBalancerService,
|
service: &LoadBalancerService,
|
||||||
) -> Result<(), ExecutorError> {
|
) -> Result<(), ExecutorError> {
|
||||||
self.add_service(service).await?;
|
debug!(
|
||||||
|
"Listing LoadBalancer services {:?}",
|
||||||
|
self.list_services().await
|
||||||
|
);
|
||||||
|
if !self.list_services().await.contains(service) {
|
||||||
|
self.add_service(service).await?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -178,8 +178,6 @@ impl FromStr for DnsRecordType {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Switch: Send + Sync {
|
pub trait Switch: Send + Sync {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
|
||||||
|
|
||||||
async fn get_port_for_mac_address(
|
async fn get_port_for_mac_address(
|
||||||
&self,
|
&self,
|
||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
@@ -226,18 +224,6 @@ impl Error for SwitchError {}
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait SwitchClient: Send + Sync {
|
pub trait SwitchClient: Send + Sync {
|
||||||
/// Executes essential, idempotent, one-time initial configuration steps.
|
|
||||||
///
|
|
||||||
/// This is an opiniated procedure that setups a switch to provide high availability
|
|
||||||
/// capabilities as decided by the NationTech team.
|
|
||||||
///
|
|
||||||
/// This includes tasks like enabling switchport for all interfaces
|
|
||||||
/// except the ones intended for Fabric Networking, etc.
|
|
||||||
///
|
|
||||||
/// The implementation must ensure the operation is **idempotent** (safe to run multiple times)
|
|
||||||
/// and that it doesn't break existing configurations.
|
|
||||||
async fn setup(&self) -> Result<(), SwitchError>;
|
|
||||||
|
|
||||||
async fn find_port(
|
async fn find_port(
|
||||||
&self,
|
&self,
|
||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
use brocade::{BrocadeClient, BrocadeOptions};
|
||||||
use harmony_secret::Secret;
|
use harmony_secret::Secret;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
net::{IpAddress, MacAddress},
|
net::{IpAddress, MacAddress},
|
||||||
switch::{PortDeclaration, PortLocation},
|
switch::{PortDeclaration, PortLocation},
|
||||||
};
|
};
|
||||||
use option_ext::OptionExt;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::topology::{SwitchClient, SwitchError};
|
use crate::topology::{SwitchClient, SwitchError};
|
||||||
@@ -28,52 +27,13 @@ impl BrocadeSwitchClient {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl SwitchClient for BrocadeSwitchClient {
|
impl SwitchClient for BrocadeSwitchClient {
|
||||||
async fn setup(&self) -> Result<(), SwitchError> {
|
|
||||||
let stack_topology = self
|
|
||||||
.brocade
|
|
||||||
.get_stack_topology()
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
|
||||||
|
|
||||||
let interfaces = self
|
|
||||||
.brocade
|
|
||||||
.get_interfaces()
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
|
||||||
|
|
||||||
let interfaces: Vec<(String, PortOperatingMode)> = interfaces
|
|
||||||
.into_iter()
|
|
||||||
.filter(|interface| {
|
|
||||||
interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected
|
|
||||||
})
|
|
||||||
.filter(|interface| {
|
|
||||||
!stack_topology.iter().any(|link: &InterSwitchLink| {
|
|
||||||
link.local_port == interface.port_location
|
|
||||||
|| link.remote_port.contains(&interface.port_location)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.map(|interface| (interface.name.clone(), PortOperatingMode::Access))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if interfaces.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.brocade
|
|
||||||
.configure_interfaces(interfaces)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn find_port(
|
async fn find_port(
|
||||||
&self,
|
&self,
|
||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
let table = self
|
let table = self
|
||||||
.brocade
|
.brocade
|
||||||
.get_mac_address_table()
|
.show_mac_address_table()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
||||||
|
|
||||||
@@ -119,267 +79,3 @@ pub struct BrocadeSwitchAuth {
|
|||||||
pub username: String,
|
pub username: String,
|
||||||
pub password: String,
|
pub password: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
use assertor::*;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use brocade::{
|
|
||||||
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
|
|
||||||
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
|
||||||
};
|
|
||||||
use harmony_types::switch::PortLocation;
|
|
||||||
|
|
||||||
use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn setup_should_configure_ethernet_interfaces_as_access_ports() {
|
|
||||||
let first_interface = given_interface()
|
|
||||||
.with_port_location(PortLocation(1, 0, 1))
|
|
||||||
.build();
|
|
||||||
let second_interface = given_interface()
|
|
||||||
.with_port_location(PortLocation(1, 0, 4))
|
|
||||||
.build();
|
|
||||||
let brocade = Box::new(FakeBrocadeClient::new(
|
|
||||||
vec![],
|
|
||||||
vec![first_interface.clone(), second_interface.clone()],
|
|
||||||
));
|
|
||||||
let client = BrocadeSwitchClient {
|
|
||||||
brocade: brocade.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
client.setup().await.unwrap();
|
|
||||||
|
|
||||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
|
||||||
assert_that!(*configured_interfaces).contains_exactly(vec![
|
|
||||||
(first_interface.name.clone(), PortOperatingMode::Access),
|
|
||||||
(second_interface.name.clone(), PortOperatingMode::Access),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn setup_with_an_already_configured_interface_should_skip_configuration() {
|
|
||||||
let brocade = Box::new(FakeBrocadeClient::new(
|
|
||||||
vec![],
|
|
||||||
vec![
|
|
||||||
given_interface()
|
|
||||||
.with_operating_mode(Some(PortOperatingMode::Access))
|
|
||||||
.build(),
|
|
||||||
],
|
|
||||||
));
|
|
||||||
let client = BrocadeSwitchClient {
|
|
||||||
brocade: brocade.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
client.setup().await.unwrap();
|
|
||||||
|
|
||||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
|
||||||
assert_that!(*configured_interfaces).is_empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn setup_with_a_disconnected_interface_should_skip_configuration() {
|
|
||||||
let brocade = Box::new(FakeBrocadeClient::new(
|
|
||||||
vec![],
|
|
||||||
vec![
|
|
||||||
given_interface()
|
|
||||||
.with_status(InterfaceStatus::SfpAbsent)
|
|
||||||
.build(),
|
|
||||||
given_interface()
|
|
||||||
.with_status(InterfaceStatus::NotConnected)
|
|
||||||
.build(),
|
|
||||||
],
|
|
||||||
));
|
|
||||||
let client = BrocadeSwitchClient {
|
|
||||||
brocade: brocade.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
client.setup().await.unwrap();
|
|
||||||
|
|
||||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
|
||||||
assert_that!(*configured_interfaces).is_empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() {
|
|
||||||
let brocade = Box::new(FakeBrocadeClient::new(
|
|
||||||
vec![
|
|
||||||
given_inter_switch_link()
|
|
||||||
.between(PortLocation(1, 0, 1), PortLocation(2, 0, 1))
|
|
||||||
.build(),
|
|
||||||
given_inter_switch_link()
|
|
||||||
.between(PortLocation(2, 0, 2), PortLocation(3, 0, 1))
|
|
||||||
.build(),
|
|
||||||
],
|
|
||||||
vec![
|
|
||||||
given_interface()
|
|
||||||
.with_port_location(PortLocation(1, 0, 1))
|
|
||||||
.build(),
|
|
||||||
given_interface()
|
|
||||||
.with_port_location(PortLocation(2, 0, 1))
|
|
||||||
.build(),
|
|
||||||
given_interface()
|
|
||||||
.with_port_location(PortLocation(3, 0, 1))
|
|
||||||
.build(),
|
|
||||||
],
|
|
||||||
));
|
|
||||||
let client = BrocadeSwitchClient {
|
|
||||||
brocade: brocade.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
client.setup().await.unwrap();
|
|
||||||
|
|
||||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
|
||||||
assert_that!(*configured_interfaces).is_empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct FakeBrocadeClient {
|
|
||||||
stack_topology: Vec<InterSwitchLink>,
|
|
||||||
interfaces: Vec<InterfaceInfo>,
|
|
||||||
configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl BrocadeClient for FakeBrocadeClient {
|
|
||||||
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
|
||||||
Ok(self.stack_topology.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
|
||||||
Ok(self.interfaces.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_interfaces(
|
|
||||||
&self,
|
|
||||||
interfaces: Vec<(String, PortOperatingMode)>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
|
||||||
*configured_interfaces = interfaces;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_port_channel(
|
|
||||||
&self,
|
|
||||||
_channel_id: PortChannelId,
|
|
||||||
_channel_name: &str,
|
|
||||||
_ports: &[PortLocation],
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FakeBrocadeClient {
|
|
||||||
fn new(stack_topology: Vec<InterSwitchLink>, interfaces: Vec<InterfaceInfo>) -> Self {
|
|
||||||
Self {
|
|
||||||
stack_topology,
|
|
||||||
interfaces,
|
|
||||||
configured_interfaces: Arc::new(Mutex::new(vec![])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct InterfaceInfoBuilder {
|
|
||||||
port_location: Option<PortLocation>,
|
|
||||||
interface_type: Option<InterfaceType>,
|
|
||||||
operating_mode: Option<PortOperatingMode>,
|
|
||||||
status: Option<InterfaceStatus>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InterfaceInfoBuilder {
|
|
||||||
fn build(&self) -> InterfaceInfo {
|
|
||||||
let interface_type = self
|
|
||||||
.interface_type
|
|
||||||
.clone()
|
|
||||||
.unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into()));
|
|
||||||
let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1));
|
|
||||||
let name = format!("{interface_type} {port_location}");
|
|
||||||
let status = self.status.clone().unwrap_or(InterfaceStatus::Connected);
|
|
||||||
|
|
||||||
InterfaceInfo {
|
|
||||||
name,
|
|
||||||
port_location,
|
|
||||||
interface_type,
|
|
||||||
operating_mode: self.operating_mode.clone(),
|
|
||||||
status,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn with_port_location(self, port_location: PortLocation) -> Self {
|
|
||||||
Self {
|
|
||||||
port_location: Some(port_location),
|
|
||||||
..self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn with_operating_mode(self, operating_mode: Option<PortOperatingMode>) -> Self {
|
|
||||||
Self {
|
|
||||||
operating_mode,
|
|
||||||
..self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn with_status(self, status: InterfaceStatus) -> Self {
|
|
||||||
Self {
|
|
||||||
status: Some(status),
|
|
||||||
..self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct InterSwitchLinkBuilder {
|
|
||||||
link: Option<(PortLocation, PortLocation)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InterSwitchLinkBuilder {
|
|
||||||
fn build(&self) -> InterSwitchLink {
|
|
||||||
let link = self
|
|
||||||
.link
|
|
||||||
.clone()
|
|
||||||
.unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1)));
|
|
||||||
|
|
||||||
InterSwitchLink {
|
|
||||||
local_port: link.0,
|
|
||||||
remote_port: Some(link.1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self {
|
|
||||||
Self {
|
|
||||||
link: Some((local_port, remote_port)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_interface() -> InterfaceInfoBuilder {
|
|
||||||
InterfaceInfoBuilder {
|
|
||||||
port_location: None,
|
|
||||||
interface_type: None,
|
|
||||||
operating_mode: None,
|
|
||||||
status: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_inter_switch_link() -> InterSwitchLinkBuilder {
|
|
||||||
InterSwitchLinkBuilder { link: None }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -26,13 +26,19 @@ impl LoadBalancer for OPNSenseFirewall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
||||||
|
warn!(
|
||||||
|
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
|
||||||
|
);
|
||||||
let mut config = self.opnsense_config.write().await;
|
let mut config = self.opnsense_config.write().await;
|
||||||
let mut load_balancer = config.load_balancer();
|
|
||||||
|
|
||||||
let (frontend, backend, servers, healthcheck) =
|
let (frontend, backend, servers, healthcheck) =
|
||||||
harmony_load_balancer_service_to_haproxy_xml(service);
|
harmony_load_balancer_service_to_haproxy_xml(service);
|
||||||
|
let mut load_balancer = config.load_balancer();
|
||||||
load_balancer.configure_service(frontend, backend, servers, healthcheck);
|
load_balancer.add_backend(backend);
|
||||||
|
load_balancer.add_frontend(frontend);
|
||||||
|
load_balancer.add_servers(servers);
|
||||||
|
if let Some(healthcheck) = healthcheck {
|
||||||
|
load_balancer.add_healthcheck(healthcheck);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -100,7 +106,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
.backends
|
.backends
|
||||||
.backends
|
.backends
|
||||||
.iter()
|
.iter()
|
||||||
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
|
.find(|b| b.uuid == frontend.default_backend);
|
||||||
|
|
||||||
let mut health_check = None;
|
let mut health_check = None;
|
||||||
match matching_backend {
|
match matching_backend {
|
||||||
@@ -110,7 +116,8 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
warn!(
|
warn!(
|
||||||
"HAProxy config could not find a matching backend for frontend {frontend:?}"
|
"HAProxy config could not find a matching backend for frontend {:?}",
|
||||||
|
frontend
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -145,11 +152,11 @@ pub(crate) fn get_servers_for_backend(
|
|||||||
.servers
|
.servers
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|server| {
|
.filter_map(|server| {
|
||||||
let address = server.address.clone()?;
|
|
||||||
let port = server.port?;
|
|
||||||
|
|
||||||
if backend_servers.contains(&server.uuid.as_str()) {
|
if backend_servers.contains(&server.uuid.as_str()) {
|
||||||
return Some(BackendServer { address, port });
|
return Some(BackendServer {
|
||||||
|
address: server.address.clone(),
|
||||||
|
port: server.port,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
})
|
})
|
||||||
@@ -340,7 +347,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
|
|||||||
name: format!("frontend_{}", service.listening_port),
|
name: format!("frontend_{}", service.listening_port),
|
||||||
bind: service.listening_port.to_string(),
|
bind: service.listening_port.to_string(),
|
||||||
mode: "tcp".to_string(), // TODO do not depend on health check here
|
mode: "tcp".to_string(), // TODO do not depend on health check here
|
||||||
default_backend: Some(backend.uuid.clone()),
|
default_backend: backend.uuid.clone(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
||||||
@@ -354,8 +361,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
|
|||||||
uuid: Uuid::new_v4().to_string(),
|
uuid: Uuid::new_v4().to_string(),
|
||||||
name: format!("{}_{}", &server.address, &server.port),
|
name: format!("{}_{}", &server.address, &server.port),
|
||||||
enabled: 1,
|
enabled: 1,
|
||||||
address: Some(server.address.clone()),
|
address: server.address.clone(),
|
||||||
port: Some(server.port),
|
port: server.port,
|
||||||
mode: "active".to_string(),
|
mode: "active".to_string(),
|
||||||
server_type: "static".to_string(),
|
server_type: "static".to_string(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -378,8 +385,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("192.168.1.1".to_string()),
|
address: "192.168.1.1".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -404,8 +411,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("192.168.1.1".to_string()),
|
address: "192.168.1.1".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -424,8 +431,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("192.168.1.1".to_string()),
|
address: "192.168.1.1".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -446,16 +453,16 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("some-hostname.test.mcd".to_string()),
|
address: "some-hostname.test.mcd".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server2".to_string(),
|
uuid: "server2".to_string(),
|
||||||
address: Some("192.168.1.2".to_string()),
|
address: "192.168.1.2".to_string(),
|
||||||
port: Some(8080),
|
port: 8080,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_macros::hurl;
|
|
||||||
use kube::{Api, api::GroupVersionKind};
|
use kube::{Api, api::GroupVersionKind};
|
||||||
use log::{debug, warn};
|
use log::{debug, warn};
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
@@ -1052,7 +1051,7 @@ commitServer:
|
|||||||
install_only: false,
|
install_only: false,
|
||||||
repository: Some(HelmRepository::new(
|
repository: Some(HelmRepository::new(
|
||||||
"argo".to_string(),
|
"argo".to_string(),
|
||||||
hurl!("https://argoproj.github.io/argo-helm"),
|
url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(),
|
||||||
true,
|
true,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,209 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use kube::{CustomResource, api::ObjectMeta};
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
|
||||||
pub struct ClusterIssuerScore {
|
|
||||||
email: String,
|
|
||||||
server: String,
|
|
||||||
issuer_name: String,
|
|
||||||
namespace: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"ClusterIssuerScore".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(ClusterIssuerInterpret {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct ClusterIssuerInterpret {
|
|
||||||
score: ClusterIssuerScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
self.apply_cluster_issuer(topology.k8s_client().await.unwrap())
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("ClusterIssuer")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterIssuerInterpret {
|
|
||||||
async fn validate_cert_manager(
|
|
||||||
&self,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let cert_manager = "cert-manager".to_string();
|
|
||||||
let operator_namespace = "openshift-operators".to_string();
|
|
||||||
match client
|
|
||||||
.get_deployment(&cert_manager, Some(&operator_namespace))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(Some(deployment)) => {
|
|
||||||
if let Some(status) = deployment.status {
|
|
||||||
let ready_count = status.ready_replicas.unwrap_or(0);
|
|
||||||
if ready_count >= 1 {
|
|
||||||
return Ok(Outcome::success(format!(
|
|
||||||
"'{}' is ready with {} replica(s).",
|
|
||||||
&cert_manager, ready_count
|
|
||||||
)));
|
|
||||||
} else {
|
|
||||||
return Err(InterpretError::new(
|
|
||||||
"cert-manager operator not ready in cluster".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"failed to get deployment status {} in ns {}",
|
|
||||||
&cert_manager, &operator_namespace
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => Err(InterpretError::new(format!(
|
|
||||||
"Deployment '{}' not found in namespace '{}'.",
|
|
||||||
&cert_manager, &operator_namespace
|
|
||||||
))),
|
|
||||||
Err(e) => Err(InterpretError::new(format!(
|
|
||||||
"Failed to query for deployment '{}': {}",
|
|
||||||
&cert_manager, e
|
|
||||||
))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> {
|
|
||||||
let issuer_name = &self.score.issuer_name;
|
|
||||||
let email = &self.score.email;
|
|
||||||
let server = &self.score.server;
|
|
||||||
let namespace = &self.score.namespace;
|
|
||||||
let cluster_issuer = ClusterIssuer {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(issuer_name.to_string()),
|
|
||||||
namespace: Some(namespace.to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: ClusterIssuerSpec {
|
|
||||||
acme: AcmeSpec {
|
|
||||||
email: email.to_string(),
|
|
||||||
private_key_secret_ref: PrivateKeySecretRef {
|
|
||||||
name: issuer_name.to_string(),
|
|
||||||
},
|
|
||||||
server: server.to_string(),
|
|
||||||
solvers: vec![SolverSpec {
|
|
||||||
http01: Some(Http01Solver {
|
|
||||||
ingress: Http01Ingress {
|
|
||||||
class: "nginx".to_string(),
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(cluster_issuer)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn apply_cluster_issuer(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let namespace = self.score.namespace.clone();
|
|
||||||
self.validate_cert_manager(&client).await?;
|
|
||||||
let cluster_issuer = self.build_cluster_issuer().unwrap();
|
|
||||||
client
|
|
||||||
.apply_yaml(
|
|
||||||
&serde_yaml::to_value(cluster_issuer).unwrap(),
|
|
||||||
Some(&namespace),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"successfully deployed cluster operator: {} in namespace: {}",
|
|
||||||
self.score.issuer_name, self.score.namespace
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "cert-manager.io",
|
|
||||||
version = "v1",
|
|
||||||
kind = "ClusterIssuer",
|
|
||||||
plural = "clusterissuers"
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ClusterIssuerSpec {
|
|
||||||
pub acme: AcmeSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct AcmeSpec {
|
|
||||||
pub email: String,
|
|
||||||
pub private_key_secret_ref: PrivateKeySecretRef,
|
|
||||||
pub server: String,
|
|
||||||
pub solvers: Vec<SolverSpec>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct PrivateKeySecretRef {
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct SolverSpec {
|
|
||||||
pub http01: Option<Http01Solver>,
|
|
||||||
// Other solver types (e.g., dns01) would go here as Options
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Http01Solver {
|
|
||||||
pub ingress: Http01Ingress,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Http01Ingress {
|
|
||||||
pub class: String,
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::{collections::HashMap, str::FromStr};
|
use std::{collections::HashMap, str::FromStr};
|
||||||
|
|
||||||
use harmony_macros::hurl;
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
@@ -34,7 +33,7 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
|
|||||||
install_only: true,
|
install_only: true,
|
||||||
repository: Some(HelmRepository::new(
|
repository: Some(HelmRepository::new(
|
||||||
"jetstack".to_string(),
|
"jetstack".to_string(),
|
||||||
hurl!("https://charts.jetstack.io"),
|
Url::parse("https://charts.jetstack.io").unwrap(),
|
||||||
true,
|
true,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,2 @@
|
|||||||
pub mod cluster_issuer;
|
|
||||||
mod helm;
|
mod helm;
|
||||||
pub use helm::*;
|
pub use helm::*;
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use crate::score::Score;
|
|||||||
use crate::topology::{HelmCommand, Topology};
|
use crate::topology::{HelmCommand, Topology};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use harmony_types::net::Url;
|
|
||||||
use helm_wrapper_rs;
|
use helm_wrapper_rs;
|
||||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
@@ -16,6 +15,7 @@ use std::path::Path;
|
|||||||
use std::process::{Command, Output, Stdio};
|
use std::process::{Command, Output, Stdio};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use temp_file::TempFile;
|
use temp_file::TempFile;
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct HelmRepository {
|
pub struct HelmRepository {
|
||||||
@@ -78,8 +78,7 @@ impl HelmChartInterpret {
|
|||||||
repo.name, repo.url, repo.force_update
|
repo.name, repo.url, repo.force_update
|
||||||
);
|
);
|
||||||
|
|
||||||
let repo_url = repo.url.to_string();
|
let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
|
||||||
let mut add_args = vec!["repo", "add", &repo.name, &repo_url];
|
|
||||||
if repo.force_update {
|
if repo.force_update {
|
||||||
add_args.push("--force-update");
|
add_args.push("--force-update");
|
||||||
}
|
}
|
||||||
|
|||||||
364
harmony/src/modules/helm/command.rs
Normal file
364
harmony/src/modules/helm/command.rs
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use log::debug;
|
||||||
|
use serde::Serialize;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::io::ErrorKind;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::process::{Command, Output};
|
||||||
|
use temp_dir::{self, TempDir};
|
||||||
|
use temp_file::TempFile;
|
||||||
|
|
||||||
|
use crate::data::Version;
|
||||||
|
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||||
|
use crate::inventory::Inventory;
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{HelmCommand, K8sclient, Topology};
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct HelmCommandExecutor {
|
||||||
|
pub env: HashMap<String, String>,
|
||||||
|
pub path: Option<PathBuf>,
|
||||||
|
pub args: Vec<String>,
|
||||||
|
pub api_versions: Option<Vec<String>>,
|
||||||
|
pub kube_version: String,
|
||||||
|
pub debug: Option<bool>,
|
||||||
|
pub globals: HelmGlobals,
|
||||||
|
pub chart: HelmChart,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct HelmGlobals {
|
||||||
|
pub chart_home: Option<PathBuf>,
|
||||||
|
pub config_home: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct HelmChart {
|
||||||
|
pub name: String,
|
||||||
|
pub version: Option<String>,
|
||||||
|
pub repo: Option<String>,
|
||||||
|
pub release_name: Option<String>,
|
||||||
|
pub namespace: Option<String>,
|
||||||
|
pub additional_values_files: Vec<PathBuf>,
|
||||||
|
pub values_file: Option<PathBuf>,
|
||||||
|
pub values_inline: Option<String>,
|
||||||
|
pub include_crds: Option<bool>,
|
||||||
|
pub skip_hooks: Option<bool>,
|
||||||
|
pub api_versions: Option<Vec<String>>,
|
||||||
|
pub kube_version: Option<String>,
|
||||||
|
pub name_template: String,
|
||||||
|
pub skip_tests: Option<bool>,
|
||||||
|
pub debug: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HelmCommandExecutor {
|
||||||
|
pub fn generate(mut self) -> Result<String, std::io::Error> {
|
||||||
|
if self.globals.chart_home.is_none() {
|
||||||
|
self.globals.chart_home = Some(PathBuf::from("charts"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if self
|
||||||
|
.clone()
|
||||||
|
.chart
|
||||||
|
.clone()
|
||||||
|
.chart_exists_locally(self.clone().globals.chart_home.unwrap())
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
if self.chart.repo.is_none() {
|
||||||
|
return Err(std::io::Error::new(
|
||||||
|
ErrorKind::Other,
|
||||||
|
"Chart doesn't exist locally and no repo specified",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
self.clone().run_command(
|
||||||
|
self.chart
|
||||||
|
.clone()
|
||||||
|
.pull_command(self.globals.chart_home.clone().unwrap()),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let out = self.clone().run_command(
|
||||||
|
self.chart
|
||||||
|
.clone()
|
||||||
|
.helm_args(self.globals.chart_home.clone().unwrap()),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// TODO: don't use unwrap here
|
||||||
|
let s = String::from_utf8(out.stdout).unwrap();
|
||||||
|
debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
|
||||||
|
debug!("helm status: {}", out.status);
|
||||||
|
debug!("helm output: {s}");
|
||||||
|
|
||||||
|
let clean = s.split_once("---").unwrap().1;
|
||||||
|
|
||||||
|
Ok(clean.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn version(self) -> Result<String, std::io::Error> {
|
||||||
|
let out = self.run_command(vec![
|
||||||
|
"version".to_string(),
|
||||||
|
"-c".to_string(),
|
||||||
|
"--short".to_string(),
|
||||||
|
])?;
|
||||||
|
|
||||||
|
// TODO: don't use unwrap
|
||||||
|
Ok(String::from_utf8(out.stdout).unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
|
||||||
|
if let Some(d) = self.debug {
|
||||||
|
if d {
|
||||||
|
args.push("--debug".to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = if let Some(p) = self.path {
|
||||||
|
p
|
||||||
|
} else {
|
||||||
|
PathBuf::from("helm")
|
||||||
|
};
|
||||||
|
|
||||||
|
let config_home = match self.globals.config_home {
|
||||||
|
Some(p) => p,
|
||||||
|
None => PathBuf::from(TempDir::new()?.path()),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(yaml_str) = self.chart.values_inline {
|
||||||
|
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
|
||||||
|
self.chart
|
||||||
|
.additional_values_files
|
||||||
|
.push(PathBuf::from(tf.path()));
|
||||||
|
};
|
||||||
|
|
||||||
|
self.env.insert(
|
||||||
|
"HELM_CONFIG_HOME".to_string(),
|
||||||
|
config_home.to_str().unwrap().to_string(),
|
||||||
|
);
|
||||||
|
self.env.insert(
|
||||||
|
"HELM_CACHE_HOME".to_string(),
|
||||||
|
config_home.to_str().unwrap().to_string(),
|
||||||
|
);
|
||||||
|
self.env.insert(
|
||||||
|
"HELM_DATA_HOME".to_string(),
|
||||||
|
config_home.to_str().unwrap().to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Command::new(path).envs(self.env).args(args).output()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HelmChart {
|
||||||
|
pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
|
||||||
|
let chart_path =
|
||||||
|
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
|
||||||
|
|
||||||
|
if chart_path.exists() {
|
||||||
|
Some(chart_path)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
|
||||||
|
let mut args = vec![
|
||||||
|
"pull".to_string(),
|
||||||
|
"--untar".to_string(),
|
||||||
|
"--untardir".to_string(),
|
||||||
|
chart_home.to_str().unwrap().to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
match self.repo {
|
||||||
|
Some(r) => {
|
||||||
|
if r.starts_with("oci://") {
|
||||||
|
args.push(
|
||||||
|
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
args.push("--repo".to_string());
|
||||||
|
args.push(r.to_string());
|
||||||
|
|
||||||
|
args.push(self.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => args.push(self.name),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(v) = self.version {
|
||||||
|
args.push("--version".to_string());
|
||||||
|
args.push(v.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
args
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
|
||||||
|
let mut args: Vec<String> = vec!["template".to_string()];
|
||||||
|
|
||||||
|
match self.release_name {
|
||||||
|
Some(rn) => args.push(rn.to_string()),
|
||||||
|
None => args.push("--generate-name".to_string()),
|
||||||
|
}
|
||||||
|
|
||||||
|
args.push(
|
||||||
|
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(n) = self.namespace {
|
||||||
|
args.push("--namespace".to_string());
|
||||||
|
args.push(n.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(f) = self.values_file {
|
||||||
|
args.push("-f".to_string());
|
||||||
|
args.push(f.to_str().unwrap().to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
for f in self.additional_values_files {
|
||||||
|
args.push("-f".to_string());
|
||||||
|
args.push(f.to_str().unwrap().to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(vv) = self.api_versions {
|
||||||
|
for v in vv {
|
||||||
|
args.push("--api-versions".to_string());
|
||||||
|
args.push(v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(kv) = self.kube_version {
|
||||||
|
args.push("--kube-version".to_string());
|
||||||
|
args.push(kv);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(crd) = self.include_crds {
|
||||||
|
if crd {
|
||||||
|
args.push("--include-crds".to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(st) = self.skip_tests {
|
||||||
|
if st {
|
||||||
|
args.push("--skip-tests".to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(sh) = self.skip_hooks {
|
||||||
|
if sh {
|
||||||
|
args.push("--no-hooks".to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(d) = self.debug {
|
||||||
|
if d {
|
||||||
|
args.push("--debug".to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
args
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct HelmChartScoreV2 {
|
||||||
|
pub chart: HelmChart,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(HelmChartInterpretV2 {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"{} {} HelmChartScoreV2",
|
||||||
|
self.chart
|
||||||
|
.release_name
|
||||||
|
.clone()
|
||||||
|
.unwrap_or("Unknown".to_string()),
|
||||||
|
self.chart.name
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct HelmChartInterpretV2 {
|
||||||
|
pub score: HelmChartScoreV2,
|
||||||
|
}
|
||||||
|
impl HelmChartInterpretV2 {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
_topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let _ns = self
|
||||||
|
.score
|
||||||
|
.chart
|
||||||
|
.namespace
|
||||||
|
.as_ref()
|
||||||
|
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
|
||||||
|
|
||||||
|
let helm_executor = HelmCommandExecutor {
|
||||||
|
env: HashMap::new(),
|
||||||
|
path: None,
|
||||||
|
args: vec![],
|
||||||
|
api_versions: None,
|
||||||
|
kube_version: "v1.33.0".to_string(),
|
||||||
|
debug: Some(false),
|
||||||
|
globals: HelmGlobals {
|
||||||
|
chart_home: None,
|
||||||
|
config_home: None,
|
||||||
|
},
|
||||||
|
chart: self.score.chart.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// let mut helm_options = Vec::new();
|
||||||
|
// if self.score.create_namespace {
|
||||||
|
// helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
|
||||||
|
// }
|
||||||
|
|
||||||
|
let res = helm_executor.generate();
|
||||||
|
|
||||||
|
let _output = match res {
|
||||||
|
Ok(output) => output,
|
||||||
|
Err(err) => return Err(InterpretError::new(err.to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
|
||||||
|
|
||||||
|
// let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
|
||||||
|
|
||||||
|
// let client = topology
|
||||||
|
// .k8s_client()
|
||||||
|
// .await
|
||||||
|
// .expect("Environment should provide enough information to instanciate a client")
|
||||||
|
// .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
|
||||||
|
// match client.apply_yaml(output) {
|
||||||
|
// Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
|
||||||
|
// Err(e) => return Err(InterpretError::new(e)),
|
||||||
|
// }
|
||||||
|
|
||||||
|
Ok(Outcome::success("Helm chart deployed".to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::HelmCommand
|
||||||
|
}
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1 +1,2 @@
|
|||||||
pub mod chart;
|
pub mod chart;
|
||||||
|
pub mod command;
|
||||||
|
|||||||
@@ -4,5 +4,4 @@ pub mod application_monitoring;
|
|||||||
pub mod grafana;
|
pub mod grafana;
|
||||||
pub mod kube_prometheus;
|
pub mod kube_prometheus;
|
||||||
pub mod ntfy;
|
pub mod ntfy;
|
||||||
pub mod okd;
|
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
|
|||||||
@@ -1,149 +0,0 @@
|
|||||||
use std::{collections::BTreeMap, sync::Arc};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
|
||||||
};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use k8s_openapi::api::core::v1::ConfigMap;
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
|
||||||
pub struct OpenshiftUserWorkloadMonitoring {}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for OpenshiftUserWorkloadMonitoring {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"OpenshiftUserWorkloadMonitoringScore".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(OpenshiftUserWorkloadMonitoringInterpret {})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
|
||||||
pub struct OpenshiftUserWorkloadMonitoringInterpret {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let client = topology.k8s_client().await.unwrap();
|
|
||||||
self.update_cluster_monitoring_config_cm(&client).await?;
|
|
||||||
self.update_user_workload_monitoring_config_cm(&client)
|
|
||||||
.await?;
|
|
||||||
self.verify_user_workload(&client).await?;
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"successfully enabled user-workload-monitoring".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OpenshiftUserWorkloadMonitoring")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OpenshiftUserWorkloadMonitoringInterpret {
|
|
||||||
pub async fn update_cluster_monitoring_config_cm(
|
|
||||||
&self,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let mut data = BTreeMap::new();
|
|
||||||
data.insert(
|
|
||||||
"config.yaml".to_string(),
|
|
||||||
r#"
|
|
||||||
enableUserWorkload: true
|
|
||||||
alertmanagerMain:
|
|
||||||
enableUserAlertmanagerConfig: true
|
|
||||||
"#
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let cm = ConfigMap {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("cluster-monitoring-config".to_string()),
|
|
||||||
namespace: Some("openshift-monitoring".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
data: Some(data),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
client.apply(&cm, Some("openshift-monitoring")).await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"updated cluster-monitoring-config-map".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update_user_workload_monitoring_config_cm(
|
|
||||||
&self,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let mut data = BTreeMap::new();
|
|
||||||
data.insert(
|
|
||||||
"config.yaml".to_string(),
|
|
||||||
r#"
|
|
||||||
alertmanager:
|
|
||||||
enabled: true
|
|
||||||
enableAlertmanagerConfig: true
|
|
||||||
"#
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
let cm = ConfigMap {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("user-workload-monitoring-config".to_string()),
|
|
||||||
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
data: Some(data),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
client
|
|
||||||
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"updated openshift-user-monitoring-config-map".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn verify_user_workload(
|
|
||||||
&self,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let namespace = "openshift-user-workload-monitoring";
|
|
||||||
let alertmanager_name = "alertmanager-user-workload-0";
|
|
||||||
let prometheus_name = "prometheus-user-workload-0";
|
|
||||||
client
|
|
||||||
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
|
||||||
.await?;
|
|
||||||
client
|
|
||||||
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"pods: {}, {} ready in ns: {}",
|
|
||||||
alertmanager_name, prometheus_name, namespace
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
pub mod enable_user_workload;
|
|
||||||
@@ -215,7 +215,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
) -> Result<(), InterpretError> {
|
) -> Result<(), InterpretError> {
|
||||||
info!("[ControlPlane] Ensuring persistent bonding");
|
info!("[ControlPlane] Ensuring persistent bonding");
|
||||||
let score = HostNetworkConfigurationScore {
|
let score = HostNetworkConfigurationScore {
|
||||||
hosts: hosts.clone(),
|
hosts: hosts.clone(), // FIXME: Avoid clone if possible
|
||||||
};
|
};
|
||||||
score.interpret(inventory, topology).await?;
|
score.interpret(inventory, topology).await?;
|
||||||
|
|
||||||
|
|||||||
@@ -77,8 +77,6 @@ impl OKDBootstrapLoadBalancerScore {
|
|||||||
address: topology.bootstrap_host.ip.to_string(),
|
address: topology.bootstrap_host.ip.to_string(),
|
||||||
port,
|
port,
|
||||||
});
|
});
|
||||||
|
|
||||||
backend.dedup();
|
|
||||||
backend
|
backend
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -240,7 +240,7 @@ pub struct OvsPortSpec {
|
|||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct EthtoolSpec {
|
pub struct EthtoolSpec {
|
||||||
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
// FIXME: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::{debug, info};
|
use log::{debug, info, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -34,59 +34,6 @@ pub struct HostNetworkConfigurationInterpret {
|
|||||||
score: HostNetworkConfigurationScore,
|
score: HostNetworkConfigurationScore,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HostNetworkConfigurationInterpret {
|
|
||||||
async fn configure_network_for_host<T: Topology + Switch>(
|
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
let switch_ports = self.collect_switch_ports_for_host(topology, host).await?;
|
|
||||||
if !switch_ports.is_empty() {
|
|
||||||
topology
|
|
||||||
.configure_host_network(host, HostNetworkConfig { switch_ports })
|
|
||||||
.await
|
|
||||||
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
) -> Result<Vec<SwitchPort>, InterpretError> {
|
|
||||||
let mut switch_ports = vec![];
|
|
||||||
|
|
||||||
for network_interface in &host.network {
|
|
||||||
let mac_address = network_interface.mac_address;
|
|
||||||
|
|
||||||
match topology.get_port_for_mac_address(&mac_address).await {
|
|
||||||
Ok(Some(port)) => {
|
|
||||||
switch_ports.push(SwitchPort {
|
|
||||||
interface: NetworkInterface {
|
|
||||||
name: network_interface.name.clone(),
|
|
||||||
mac_address,
|
|
||||||
speed_mbps: network_interface.speed_mbps,
|
|
||||||
mtu: network_interface.mtu,
|
|
||||||
},
|
|
||||||
port,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
|
|
||||||
Err(e) => {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Failed to get port for host '{}': {}",
|
|
||||||
host.id, e
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(switch_ports)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
@@ -110,24 +57,43 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
|||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
if self.score.hosts.is_empty() {
|
|
||||||
return Ok(Outcome::noop("No hosts to configure".into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Started network configuration for {} host(s)...",
|
"Started network configuration for {} host(s)...",
|
||||||
self.score.hosts.len()
|
self.score.hosts.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
topology
|
|
||||||
.setup_switch()
|
|
||||||
.await
|
|
||||||
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
|
|
||||||
|
|
||||||
let mut configured_host_count = 0;
|
let mut configured_host_count = 0;
|
||||||
|
|
||||||
for host in &self.score.hosts {
|
for host in &self.score.hosts {
|
||||||
self.configure_network_for_host(topology, host).await?;
|
let mut switch_ports = vec![];
|
||||||
configured_host_count += 1;
|
|
||||||
|
for network_interface in &host.network {
|
||||||
|
let mac_address = network_interface.mac_address;
|
||||||
|
|
||||||
|
match topology.get_port_for_mac_address(&mac_address).await {
|
||||||
|
Ok(Some(port)) => {
|
||||||
|
switch_ports.push(SwitchPort {
|
||||||
|
interface: NetworkInterface {
|
||||||
|
name: network_interface.name.clone(),
|
||||||
|
mac_address,
|
||||||
|
speed_mbps: network_interface.speed_mbps,
|
||||||
|
mtu: network_interface.mtu,
|
||||||
|
},
|
||||||
|
port,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
|
||||||
|
Err(e) => warn!("Failed to get port for host '{}': {}", host.id, e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !switch_ports.is_empty() {
|
||||||
|
configured_host_count += 1;
|
||||||
|
topology
|
||||||
|
.configure_host_network(host, HostNetworkConfig { switch_ports })
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if configured_host_count > 0 {
|
if configured_host_count > 0 {
|
||||||
@@ -185,18 +151,6 @@ mod tests {
|
|||||||
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn should_setup_switch() {
|
|
||||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
|
||||||
let score = given_score(vec![host]);
|
|
||||||
let topology = TopologyWithSwitch::new();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let switch_setup = topology.switch_setup.lock().unwrap();
|
|
||||||
assert_that!(*switch_setup).is_true();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
||||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||||
@@ -283,6 +237,7 @@ mod tests {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
||||||
|
// FIXME: Should it still configure an empty bond/port channel?
|
||||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||||
let topology = TopologyWithSwitch::new_port_not_found();
|
let topology = TopologyWithSwitch::new_port_not_found();
|
||||||
|
|
||||||
@@ -329,7 +284,6 @@ mod tests {
|
|||||||
struct TopologyWithSwitch {
|
struct TopologyWithSwitch {
|
||||||
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
||||||
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||||
switch_setup: Arc<Mutex<bool>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TopologyWithSwitch {
|
impl TopologyWithSwitch {
|
||||||
@@ -337,7 +291,6 @@ mod tests {
|
|||||||
Self {
|
Self {
|
||||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
||||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||||
switch_setup: Arc::new(Mutex::new(false)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,7 +298,6 @@ mod tests {
|
|||||||
Self {
|
Self {
|
||||||
available_ports: Arc::new(Mutex::new(vec![])),
|
available_ports: Arc::new(Mutex::new(vec![])),
|
||||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||||
switch_setup: Arc::new(Mutex::new(false)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -363,12 +315,6 @@ mod tests {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Switch for TopologyWithSwitch {
|
impl Switch for TopologyWithSwitch {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
|
||||||
let mut switch_configured = self.switch_setup.lock().unwrap();
|
|
||||||
*switch_configured = true;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_port_for_mac_address(
|
async fn get_port_for_mac_address(
|
||||||
&self,
|
&self,
|
||||||
_mac_address: &MacAddress,
|
_mac_address: &MacAddress,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::{debug, warn};
|
use log::{info, warn};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
@@ -19,8 +19,8 @@ use harmony_types::id::Id;
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct CephRemoveOsd {
|
pub struct CephRemoveOsd {
|
||||||
pub osd_deployment_name: String,
|
osd_deployment_name: String,
|
||||||
pub rook_ceph_namespace: String,
|
rook_ceph_namespace: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
||||||
@@ -54,17 +54,18 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
|||||||
self.verify_deployment_scaled(client.clone()).await?;
|
self.verify_deployment_scaled(client.clone()).await?;
|
||||||
self.delete_deployment(client.clone()).await?;
|
self.delete_deployment(client.clone()).await?;
|
||||||
self.verify_deployment_deleted(client.clone()).await?;
|
self.verify_deployment_deleted(client.clone()).await?;
|
||||||
self.purge_ceph_osd(client.clone()).await?;
|
|
||||||
self.verify_ceph_osd_removal(client.clone()).await?;
|
|
||||||
|
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||||
|
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
|
||||||
|
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
||||||
osd_id_full, self.score.osd_deployment_name
|
osd_id_full, self.score.osd_deployment_name
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::CephRemoveOsd
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
fn get_version(&self) -> Version {
|
||||||
@@ -81,7 +82,7 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CephRemoveOsdInterpret {
|
impl CephRemoveOsdInterpret {
|
||||||
pub fn get_ceph_osd_id_numeric(&self) -> Result<String, InterpretError> {
|
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
||||||
let osd_id_numeric = self
|
let osd_id_numeric = self
|
||||||
.score
|
.score
|
||||||
.osd_deployment_name
|
.osd_deployment_name
|
||||||
@@ -93,14 +94,9 @@ impl CephRemoveOsdInterpret {
|
|||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
Ok(osd_id_numeric.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
|
||||||
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
|
||||||
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
||||||
|
|
||||||
debug!(
|
info!(
|
||||||
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
||||||
osd_id_full, self.score.osd_deployment_name
|
osd_id_full, self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -112,7 +108,6 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!("verifying toolbox exists");
|
|
||||||
let toolbox_dep = "rook-ceph-tools".to_string();
|
let toolbox_dep = "rook-ceph-tools".to_string();
|
||||||
|
|
||||||
match client
|
match client
|
||||||
@@ -154,7 +149,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!(
|
info!(
|
||||||
"Scaling down OSD deployment: {}",
|
"Scaling down OSD deployment: {}",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -177,7 +172,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
|
|
||||||
debug!("Waiting for OSD deployment to scale down to 0 replicas");
|
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||||
loop {
|
loop {
|
||||||
let dep = client
|
let dep = client
|
||||||
.get_deployment(
|
.get_deployment(
|
||||||
@@ -185,9 +180,11 @@ impl CephRemoveOsdInterpret {
|
|||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Some(deployment) = dep {
|
if let Some(deployment) = dep {
|
||||||
if let Some(status) = deployment.status {
|
if let Some(status) = deployment.status {
|
||||||
if status.replicas == None && status.ready_replicas == None {
|
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
|
||||||
|
{
|
||||||
return Ok(Outcome::success(
|
return Ok(Outcome::success(
|
||||||
"Deployment successfully scaled down.".to_string(),
|
"Deployment successfully scaled down.".to_string(),
|
||||||
));
|
));
|
||||||
@@ -215,7 +212,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!(
|
info!(
|
||||||
"Deleting OSD deployment: {}",
|
"Deleting OSD deployment: {}",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -237,7 +234,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
|
|
||||||
debug!("Verifying OSD deployment deleted");
|
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||||
loop {
|
loop {
|
||||||
let dep = client
|
let dep = client
|
||||||
.get_deployment(
|
.get_deployment(
|
||||||
@@ -247,7 +244,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if dep.is_none() {
|
if dep.is_none() {
|
||||||
debug!(
|
info!(
|
||||||
"Deployment {} successfully deleted.",
|
"Deployment {} successfully deleted.",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -279,10 +276,12 @@ impl CephRemoveOsdInterpret {
|
|||||||
Ok(tree)
|
Ok(tree)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn purge_ceph_osd(&self, client: Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
pub async fn purge_ceph_osd(
|
||||||
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
&self,
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
client: Arc<K8sClient>,
|
||||||
debug!(
|
osd_id_full: &str,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
info!(
|
||||||
"Purging OSD {} from Ceph cluster and removing its auth key",
|
"Purging OSD {} from Ceph cluster and removing its auth key",
|
||||||
osd_id_full
|
osd_id_full
|
||||||
);
|
);
|
||||||
@@ -292,9 +291,8 @@ impl CephRemoveOsdInterpret {
|
|||||||
"app".to_string(),
|
"app".to_string(),
|
||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
vec![
|
vec![
|
||||||
"sh",
|
format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
|
||||||
"-c",
|
format!("ceph auth del osd.{osd_id_full}").as_str(),
|
||||||
format!("ceph osd purge {osd_id_numeric} --yes-i-really-mean-it && ceph auth del {osd_id_full}").as_str(),
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -307,10 +305,10 @@ impl CephRemoveOsdInterpret {
|
|||||||
pub async fn verify_ceph_osd_removal(
|
pub async fn verify_ceph_osd_removal(
|
||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
|
osd_id_full: &str,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
info!(
|
||||||
debug!(
|
|
||||||
"Verifying OSD {} has been removed from the Ceph tree...",
|
"Verifying OSD {} has been removed from the Ceph tree...",
|
||||||
osd_id_full
|
osd_id_full
|
||||||
);
|
);
|
||||||
@@ -320,7 +318,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
"rook-ceph-tools".to_string(),
|
"rook-ceph-tools".to_string(),
|
||||||
"app".to_string(),
|
"app".to_string(),
|
||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
vec!["sh", "-c", "ceph osd tree -f json"],
|
vec!["ceph osd tree -f json"],
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
let tree =
|
let tree =
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
pub mod ceph_remove_osd_score;
|
pub mod ceph_osd_replacement_score;
|
||||||
pub mod ceph_validate_health_score;
|
pub mod ceph_validate_health_score;
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Debug)]
|
||||||
pub struct HAProxyId(String);
|
pub struct HAProxyId(String);
|
||||||
|
|
||||||
impl Default for HAProxyId {
|
impl Default for HAProxyId {
|
||||||
@@ -297,7 +297,7 @@ pub struct HAProxyFrontends {
|
|||||||
pub frontend: Vec<Frontend>,
|
pub frontend: Vec<Frontend>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct Frontend {
|
pub struct Frontend {
|
||||||
#[yaserde(attribute = true)]
|
#[yaserde(attribute = true)]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
@@ -310,7 +310,7 @@ pub struct Frontend {
|
|||||||
pub bind_options: MaybeString,
|
pub bind_options: MaybeString,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
#[yaserde(rename = "defaultBackend")]
|
#[yaserde(rename = "defaultBackend")]
|
||||||
pub default_backend: Option<String>,
|
pub default_backend: String,
|
||||||
pub ssl_enabled: i32,
|
pub ssl_enabled: i32,
|
||||||
pub ssl_certificates: MaybeString,
|
pub ssl_certificates: MaybeString,
|
||||||
pub ssl_default_certificate: MaybeString,
|
pub ssl_default_certificate: MaybeString,
|
||||||
@@ -416,7 +416,7 @@ pub struct HAProxyBackends {
|
|||||||
pub backends: Vec<HAProxyBackend>,
|
pub backends: Vec<HAProxyBackend>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct HAProxyBackend {
|
pub struct HAProxyBackend {
|
||||||
#[yaserde(attribute = true, rename = "uuid")]
|
#[yaserde(attribute = true, rename = "uuid")]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
@@ -535,7 +535,7 @@ pub struct HAProxyServers {
|
|||||||
pub servers: Vec<HAProxyServer>,
|
pub servers: Vec<HAProxyServer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct HAProxyServer {
|
pub struct HAProxyServer {
|
||||||
#[yaserde(attribute = true, rename = "uuid")]
|
#[yaserde(attribute = true, rename = "uuid")]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
@@ -543,8 +543,8 @@ pub struct HAProxyServer {
|
|||||||
pub enabled: u8,
|
pub enabled: u8,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub description: MaybeString,
|
pub description: MaybeString,
|
||||||
pub address: Option<String>,
|
pub address: String,
|
||||||
pub port: Option<u16>,
|
pub port: u16,
|
||||||
pub checkport: MaybeString,
|
pub checkport: MaybeString,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
pub multiplexer_protocol: MaybeString,
|
pub multiplexer_protocol: MaybeString,
|
||||||
@@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks {
|
|||||||
pub healthchecks: Vec<HAProxyHealthCheck>,
|
pub healthchecks: Vec<HAProxyHealthCheck>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct HAProxyHealthCheck {
|
pub struct HAProxyHealthCheck {
|
||||||
#[yaserde(attribute = true)]
|
#[yaserde(attribute = true)]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ sha2 = "0.10.9"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
assertor.workspace = true
|
|
||||||
|
|
||||||
[lints.rust]
|
[lints.rust]
|
||||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }
|
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }
|
||||||
|
|||||||
@@ -30,7 +30,8 @@ impl SshConfigManager {
|
|||||||
|
|
||||||
self.opnsense_shell
|
self.opnsense_shell
|
||||||
.exec(&format!(
|
.exec(&format!(
|
||||||
"cp /conf/config.xml /conf/backup/{backup_filename}"
|
"cp /conf/config.xml /conf/backup/{}",
|
||||||
|
backup_filename
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
mod ssh;
|
mod ssh;
|
||||||
use crate::Error;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
pub use ssh::*;
|
pub use ssh::*;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::Error;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {
|
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {
|
||||||
async fn exec(&self, command: &str) -> Result<String, Error>;
|
async fn exec(&self, command: &str) -> Result<String, Error>;
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
use crate::{config::OPNsenseShell, Error};
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use log::warn;
|
||||||
use opnsense_config_xml::{
|
use opnsense_config_xml::{
|
||||||
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
|
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
|
||||||
};
|
};
|
||||||
use std::{collections::HashSet, sync::Arc};
|
|
||||||
|
use crate::{config::OPNsenseShell, Error};
|
||||||
|
|
||||||
pub struct LoadBalancerConfig<'a> {
|
pub struct LoadBalancerConfig<'a> {
|
||||||
opnsense: &'a mut OPNsense,
|
opnsense: &'a mut OPNsense,
|
||||||
@@ -28,7 +31,7 @@ impl<'a> LoadBalancerConfig<'a> {
|
|||||||
match &mut self.opnsense.opnsense.haproxy.as_mut() {
|
match &mut self.opnsense.opnsense.haproxy.as_mut() {
|
||||||
Some(haproxy) => f(haproxy),
|
Some(haproxy) => f(haproxy),
|
||||||
None => unimplemented!(
|
None => unimplemented!(
|
||||||
"Cannot configure load balancer when haproxy config does not exist yet"
|
"Adding a backend is not supported when haproxy config does not exist yet"
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -37,67 +40,21 @@ impl<'a> LoadBalancerConfig<'a> {
|
|||||||
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
|
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configures a service by removing any existing service on the same port
|
pub fn add_backend(&mut self, backend: HAProxyBackend) {
|
||||||
/// and then adding the new definition. This ensures idempotency.
|
warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks");
|
||||||
pub fn configure_service(
|
self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend));
|
||||||
&mut self,
|
|
||||||
frontend: Frontend,
|
|
||||||
backend: HAProxyBackend,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthcheck: Option<HAProxyHealthCheck>,
|
|
||||||
) {
|
|
||||||
self.remove_service_by_bind_address(&frontend.bind);
|
|
||||||
self.remove_servers(&servers);
|
|
||||||
|
|
||||||
self.add_new_service(frontend, backend, servers, healthcheck);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the corresponding real servers based on their name if they already exist.
|
pub fn add_frontend(&mut self, frontend: Frontend) {
|
||||||
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
|
self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend));
|
||||||
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
|
|
||||||
self.with_haproxy(|haproxy| {
|
|
||||||
haproxy
|
|
||||||
.servers
|
|
||||||
.servers
|
|
||||||
.retain(|s| !server_names.contains(&s.name));
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a service and its dependent components based on the frontend's bind address.
|
pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) {
|
||||||
/// This performs a cascading delete of the frontend, backend, servers, and health check.
|
self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck));
|
||||||
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
|
|
||||||
self.with_haproxy(|haproxy| {
|
|
||||||
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
remove_healthcheck(haproxy, &old_backend);
|
|
||||||
remove_linked_servers(haproxy, &old_backend);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds the components of a new service to the HAProxy configuration.
|
pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) {
|
||||||
/// This function de-duplicates servers by name to prevent configuration errors.
|
self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers));
|
||||||
fn add_new_service(
|
|
||||||
&mut self,
|
|
||||||
frontend: Frontend,
|
|
||||||
backend: HAProxyBackend,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthcheck: Option<HAProxyHealthCheck>,
|
|
||||||
) {
|
|
||||||
self.with_haproxy(|haproxy| {
|
|
||||||
if let Some(check) = healthcheck {
|
|
||||||
haproxy.healthchecks.healthchecks.push(check);
|
|
||||||
}
|
|
||||||
|
|
||||||
haproxy.servers.servers.extend(servers);
|
|
||||||
haproxy.backends.backends.push(backend);
|
|
||||||
haproxy.frontends.frontend.push(frontend);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reload_restart(&self) -> Result<(), Error> {
|
pub async fn reload_restart(&self) -> Result<(), Error> {
|
||||||
@@ -125,262 +82,3 @@ impl<'a> LoadBalancerConfig<'a> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
|
|
||||||
let pos = haproxy
|
|
||||||
.frontends
|
|
||||||
.frontend
|
|
||||||
.iter()
|
|
||||||
.position(|f| f.bind == bind_address);
|
|
||||||
|
|
||||||
match pos {
|
|
||||||
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
|
|
||||||
let default_backend = old_frontend.default_backend?;
|
|
||||||
let pos = haproxy
|
|
||||||
.backends
|
|
||||||
.backends
|
|
||||||
.iter()
|
|
||||||
.position(|b| b.uuid == default_backend);
|
|
||||||
|
|
||||||
match pos {
|
|
||||||
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
|
|
||||||
None => None, // orphaned frontend, shouldn't happen
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
|
||||||
if let Some(uuid) = &backend.health_check.content {
|
|
||||||
haproxy
|
|
||||||
.healthchecks
|
|
||||||
.healthchecks
|
|
||||||
.retain(|h| h.uuid != *uuid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove the backend's servers. This assumes servers are not shared between services.
|
|
||||||
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
|
||||||
if let Some(server_uuids_str) = &backend.linked_servers.content {
|
|
||||||
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
|
|
||||||
haproxy
|
|
||||||
.servers
|
|
||||||
.servers
|
|
||||||
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::config::DummyOPNSenseShell;
|
|
||||||
use assertor::*;
|
|
||||||
use opnsense_config_xml::{
|
|
||||||
Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
|
|
||||||
HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
|
|
||||||
};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use super::LoadBalancerConfig;
|
|
||||||
|
|
||||||
static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
|
|
||||||
static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
|
|
||||||
|
|
||||||
static SERVER_ADDRESS: &str = "1.1.1.1:80";
|
|
||||||
static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn configure_service_should_add_all_service_components_to_haproxy() {
|
|
||||||
let mut opnsense = given_opnsense();
|
|
||||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
||||||
let (healthcheck, servers, backend, frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
||||||
|
|
||||||
load_balancer.configure_service(
|
|
||||||
frontend.clone(),
|
|
||||||
backend.clone(),
|
|
||||||
servers.clone(),
|
|
||||||
Some(healthcheck.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_haproxy_configured_with(
|
|
||||||
opnsense,
|
|
||||||
vec![frontend],
|
|
||||||
vec![backend],
|
|
||||||
servers,
|
|
||||||
vec![healthcheck],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn configure_service_should_replace_service_on_same_bind_address() {
|
|
||||||
let (healthcheck, servers, backend, frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
||||||
let mut opnsense = given_opnsense_with(given_haproxy(
|
|
||||||
vec![frontend.clone()],
|
|
||||||
vec![backend.clone()],
|
|
||||||
servers.clone(),
|
|
||||||
vec![healthcheck.clone()],
|
|
||||||
));
|
|
||||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
||||||
|
|
||||||
let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
|
||||||
|
|
||||||
load_balancer.configure_service(
|
|
||||||
updated_frontend.clone(),
|
|
||||||
updated_backend.clone(),
|
|
||||||
updated_servers.clone(),
|
|
||||||
Some(updated_healthcheck.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_haproxy_configured_with(
|
|
||||||
opnsense,
|
|
||||||
vec![updated_frontend],
|
|
||||||
vec![updated_backend],
|
|
||||||
updated_servers,
|
|
||||||
vec![updated_healthcheck],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
|
|
||||||
let (healthcheck, servers, backend, frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
||||||
let (other_healthcheck, other_servers, other_backend, other_frontend) =
|
|
||||||
given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
|
||||||
let mut opnsense = given_opnsense_with(given_haproxy(
|
|
||||||
vec![frontend.clone()],
|
|
||||||
vec![backend.clone()],
|
|
||||||
servers.clone(),
|
|
||||||
vec![healthcheck.clone()],
|
|
||||||
));
|
|
||||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
||||||
|
|
||||||
load_balancer.configure_service(
|
|
||||||
other_frontend.clone(),
|
|
||||||
other_backend.clone(),
|
|
||||||
other_servers.clone(),
|
|
||||||
Some(other_healthcheck.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_haproxy_configured_with(
|
|
||||||
opnsense,
|
|
||||||
vec![frontend, other_frontend],
|
|
||||||
vec![backend, other_backend],
|
|
||||||
[servers, other_servers].concat(),
|
|
||||||
vec![healthcheck, other_healthcheck],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn assert_haproxy_configured_with(
|
|
||||||
opnsense: OPNsense,
|
|
||||||
frontends: Vec<Frontend>,
|
|
||||||
backends: Vec<HAProxyBackend>,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthchecks: Vec<HAProxyHealthCheck>,
|
|
||||||
) {
|
|
||||||
let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
|
|
||||||
assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
|
|
||||||
assert_that!(haproxy.backends.backends).contains_exactly(backends);
|
|
||||||
assert_that!(haproxy.servers.servers).is_equal_to(servers);
|
|
||||||
assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_opnsense() -> OPNsense {
|
|
||||||
OPNsense::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
|
|
||||||
let mut opnsense = OPNsense::default();
|
|
||||||
opnsense.opnsense.haproxy = Some(haproxy);
|
|
||||||
|
|
||||||
opnsense
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
|
|
||||||
let opnsense_shell = Arc::new(DummyOPNSenseShell {});
|
|
||||||
if opnsense.opnsense.haproxy.is_none() {
|
|
||||||
opnsense.opnsense.haproxy = Some(HAProxy::default());
|
|
||||||
}
|
|
||||||
LoadBalancerConfig::new(opnsense, opnsense_shell)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_service(
|
|
||||||
bind_address: &str,
|
|
||||||
server_address: &str,
|
|
||||||
) -> (
|
|
||||||
HAProxyHealthCheck,
|
|
||||||
Vec<HAProxyServer>,
|
|
||||||
HAProxyBackend,
|
|
||||||
Frontend,
|
|
||||||
) {
|
|
||||||
let healthcheck = given_healthcheck();
|
|
||||||
let servers = vec![given_server(server_address)];
|
|
||||||
let backend = given_backend();
|
|
||||||
let frontend = given_frontend(bind_address);
|
|
||||||
(healthcheck, servers, backend, frontend)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_haproxy(
|
|
||||||
frontends: Vec<Frontend>,
|
|
||||||
backends: Vec<HAProxyBackend>,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthchecks: Vec<HAProxyHealthCheck>,
|
|
||||||
) -> HAProxy {
|
|
||||||
HAProxy {
|
|
||||||
frontends: HAProxyFrontends {
|
|
||||||
frontend: frontends,
|
|
||||||
},
|
|
||||||
backends: HAProxyBackends { backends },
|
|
||||||
servers: HAProxyServers { servers },
|
|
||||||
healthchecks: HAProxyHealthChecks { healthchecks },
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_frontend(bind_address: &str) -> Frontend {
|
|
||||||
Frontend {
|
|
||||||
uuid: "uuid".into(),
|
|
||||||
id: HAProxyId::default(),
|
|
||||||
enabled: 1,
|
|
||||||
name: format!("frontend_{bind_address}"),
|
|
||||||
bind: bind_address.into(),
|
|
||||||
default_backend: Some("backend-uuid".into()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_backend() -> HAProxyBackend {
|
|
||||||
HAProxyBackend {
|
|
||||||
uuid: "backend-uuid".into(),
|
|
||||||
id: HAProxyId::default(),
|
|
||||||
enabled: 1,
|
|
||||||
name: "backend_192.168.1.1:80".into(),
|
|
||||||
linked_servers: MaybeString::from("server-uuid"),
|
|
||||||
health_check_enabled: 1,
|
|
||||||
health_check: MaybeString::from("healthcheck-uuid"),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_server(address: &str) -> HAProxyServer {
|
|
||||||
HAProxyServer {
|
|
||||||
uuid: "server-uuid".into(),
|
|
||||||
id: HAProxyId::default(),
|
|
||||||
name: address.into(),
|
|
||||||
address: Some(address.into()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_healthcheck() -> HAProxyHealthCheck {
|
|
||||||
HAProxyHealthCheck {
|
|
||||||
uuid: "healthcheck-uuid".into(),
|
|
||||||
name: "healthcheck".into(),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user