Compare commits
18 Commits
78e595e696
...
configure-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea7322f38c | ||
|
|
2edd24753a | ||
|
|
da5be17cb6 | ||
|
|
1265cebfa7 | ||
|
|
073cccde2f | ||
|
|
77e09436a9 | ||
|
|
45e0de2097 | ||
|
|
731dc5f404 | ||
|
|
1199564122 | ||
|
|
f2f55d98d4 | ||
| 7b6ac6641a | |||
| 58c1fd4a96 | |||
| 2388f585f5 | |||
| ffe3c09907 | |||
| 0de52aedbf | |||
| 427009bbfe | |||
| fe0501b784 | |||
| 61b02e7a28 |
72
Cargo.lock
generated
72
Cargo.lock
generated
@@ -1780,7 +1780,6 @@ dependencies = [
|
|||||||
name = "example-nanodc"
|
name = "example-nanodc"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"brocade",
|
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1789,7 +1788,6 @@ dependencies = [
|
|||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
"serde",
|
|
||||||
"tokio",
|
"tokio",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
@@ -1804,30 +1802,10 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "example-okd-cluster-alerts"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"brocade",
|
|
||||||
"cidr",
|
|
||||||
"env_logger",
|
|
||||||
"harmony",
|
|
||||||
"harmony_cli",
|
|
||||||
"harmony_macros",
|
|
||||||
"harmony_secret",
|
|
||||||
"harmony_secret_derive",
|
|
||||||
"harmony_types",
|
|
||||||
"log",
|
|
||||||
"serde",
|
|
||||||
"tokio",
|
|
||||||
"url",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-okd-install"
|
name = "example-okd-install"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"brocade",
|
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1858,16 +1836,25 @@ dependencies = [
|
|||||||
name = "example-opnsense"
|
name = "example-opnsense"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"brocade",
|
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
"harmony_macros",
|
"harmony_macros",
|
||||||
"harmony_secret",
|
|
||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
"serde",
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-penpot"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
"tokio",
|
"tokio",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
@@ -1876,7 +1863,6 @@ dependencies = [
|
|||||||
name = "example-pxe"
|
name = "example-pxe"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"brocade",
|
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1891,15 +1877,6 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "example-remove-rook-osd"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"harmony",
|
|
||||||
"harmony_cli",
|
|
||||||
"tokio",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-rust"
|
name = "example-rust"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -2479,6 +2456,17 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "harmony_derive"
|
||||||
|
version = "0.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2d138bbb32bb346299c5f95fbb53532313f39927cb47c411c99c634ef8665ef7"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 1.0.109",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "harmony_inventory_agent"
|
name = "harmony_inventory_agent"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -3925,6 +3913,19 @@ dependencies = [
|
|||||||
"web-time",
|
"web-time",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "okd_host_network"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_derive",
|
||||||
|
"harmony_inventory_agent",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "once_cell"
|
name = "once_cell"
|
||||||
version = "1.21.3"
|
version = "1.21.3"
|
||||||
@@ -3953,7 +3954,6 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
|||||||
name = "opnsense-config"
|
name = "opnsense-config"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assertor",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"chrono",
|
"chrono",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
|
|||||||
@@ -15,8 +15,7 @@ members = [
|
|||||||
"harmony_inventory_agent",
|
"harmony_inventory_agent",
|
||||||
"harmony_secret_derive",
|
"harmony_secret_derive",
|
||||||
"harmony_secret",
|
"harmony_secret",
|
||||||
"adr/agent_discovery/mdns",
|
"adr/agent_discovery/mdns", "brocade",
|
||||||
"brocade",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ use log::{debug, info};
|
|||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use std::{collections::HashSet, str::FromStr};
|
use std::{collections::HashSet, str::FromStr};
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct FastIronClient {
|
pub struct FastIronClient {
|
||||||
shell: BrocadeShell,
|
shell: BrocadeShell,
|
||||||
version: BrocadeInfo,
|
version: BrocadeInfo,
|
||||||
@@ -71,7 +70,7 @@ impl FastIronClient {
|
|||||||
|
|
||||||
Some(Ok(InterSwitchLink {
|
Some(Ok(InterSwitchLink {
|
||||||
local_port,
|
local_port,
|
||||||
remote_port: None,
|
remote_port: None, // FIXME: Map the remote port as well
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ pub struct BrocadeOptions {
|
|||||||
pub struct TimeoutConfig {
|
pub struct TimeoutConfig {
|
||||||
pub shell_ready: Duration,
|
pub shell_ready: Duration,
|
||||||
pub command_execution: Duration,
|
pub command_execution: Duration,
|
||||||
pub command_output: Duration,
|
|
||||||
pub cleanup: Duration,
|
pub cleanup: Duration,
|
||||||
pub message_wait: Duration,
|
pub message_wait: Duration,
|
||||||
}
|
}
|
||||||
@@ -41,7 +40,6 @@ impl Default for TimeoutConfig {
|
|||||||
Self {
|
Self {
|
||||||
shell_ready: Duration::from_secs(10),
|
shell_ready: Duration::from_secs(10),
|
||||||
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
||||||
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
|
|
||||||
cleanup: Duration::from_secs(10),
|
cleanup: Duration::from_secs(10),
|
||||||
message_wait: Duration::from_millis(500),
|
message_wait: Duration::from_millis(500),
|
||||||
}
|
}
|
||||||
@@ -164,7 +162,7 @@ pub async fn init(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait BrocadeClient: std::fmt::Debug {
|
pub trait BrocadeClient {
|
||||||
/// Retrieves the operating system and version details from the connected Brocade switch.
|
/// Retrieves the operating system and version details from the connected Brocade switch.
|
||||||
///
|
///
|
||||||
/// This is typically the first call made after establishing a connection to determine
|
/// This is typically the first call made after establishing a connection to determine
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use std::str::FromStr;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
use regex::Regex;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||||
@@ -11,7 +10,6 @@ use crate::{
|
|||||||
parse_brocade_mac_address, shell::BrocadeShell,
|
parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct NetworkOperatingSystemClient {
|
pub struct NetworkOperatingSystemClient {
|
||||||
shell: BrocadeShell,
|
shell: BrocadeShell,
|
||||||
version: BrocadeInfo,
|
version: BrocadeInfo,
|
||||||
@@ -104,37 +102,13 @@ impl NetworkOperatingSystemClient {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Some(Ok(InterfaceInfo {
|
Some(Ok(InterfaceInfo {
|
||||||
name: format!("{interface_type} {port_location}"),
|
name: format!("{} {}", interface_type, port_location),
|
||||||
port_location,
|
port_location,
|
||||||
interface_type,
|
interface_type,
|
||||||
operating_mode,
|
operating_mode,
|
||||||
status,
|
status,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn map_configure_interfaces_error(&self, err: Error) -> Error {
|
|
||||||
debug!("[Brocade] {err}");
|
|
||||||
|
|
||||||
if let Error::CommandError(message) = &err {
|
|
||||||
if message.contains("switchport")
|
|
||||||
&& message.contains("Cannot configure aggregator member")
|
|
||||||
{
|
|
||||||
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
|
|
||||||
|
|
||||||
if let Some(caps) = re.captures(message) {
|
|
||||||
let interface_type = &caps[1];
|
|
||||||
let port_location = &caps[2];
|
|
||||||
let interface = format!("{interface_type} {port_location}");
|
|
||||||
|
|
||||||
return Error::CommandError(format!(
|
|
||||||
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -222,10 +196,11 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
commands.push("exit".into());
|
commands.push("exit".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
commands.push("write memory".into());
|
||||||
|
|
||||||
self.shell
|
self.shell
|
||||||
.run_commands(commands, ExecutionMode::Regular)
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
.await
|
.await?;
|
||||||
.map_err(|err| self.map_configure_interfaces_error(err))?;
|
|
||||||
|
|
||||||
info!("[Brocade] Interfaces configured.");
|
info!("[Brocade] Interfaces configured.");
|
||||||
|
|
||||||
@@ -237,7 +212,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
|
|
||||||
let output = self
|
let output = self
|
||||||
.shell
|
.shell
|
||||||
.run_command("show port-channel summary", ExecutionMode::Regular)
|
.run_command("show port-channel", ExecutionMode::Regular)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let used_ids: Vec<u8> = output
|
let used_ids: Vec<u8> = output
|
||||||
@@ -272,12 +247,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
ports: &[PortLocation],
|
ports: &[PortLocation],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
info!(
|
info!(
|
||||||
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
|
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
||||||
ports
|
|
||||||
.iter()
|
|
||||||
.map(|p| format!("{p}"))
|
|
||||||
.collect::<Vec<String>>()
|
|
||||||
.join(", ")
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let interfaces = self.get_interfaces().await?;
|
let interfaces = self.get_interfaces().await?;
|
||||||
@@ -300,11 +270,13 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
commands.push("no ip address".into());
|
commands.push("no ip address".into());
|
||||||
commands.push("no fabric isl enable".into());
|
commands.push("no fabric isl enable".into());
|
||||||
commands.push("no fabric trunk enable".into());
|
commands.push("no fabric trunk enable".into());
|
||||||
commands.push(format!("channel-group {channel_id} mode active"));
|
commands.push(format!("channel-group {} mode active", channel_id));
|
||||||
commands.push("no shutdown".into());
|
commands.push("no shutdown".into());
|
||||||
commands.push("exit".into());
|
commands.push("exit".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
commands.push("write memory".into());
|
||||||
|
|
||||||
self.shell
|
self.shell
|
||||||
.run_commands(commands, ExecutionMode::Regular)
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -321,6 +293,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
"configure terminal".into(),
|
"configure terminal".into(),
|
||||||
format!("no interface port-channel {}", channel_name),
|
format!("no interface port-channel {}", channel_name),
|
||||||
"exit".into(),
|
"exit".into(),
|
||||||
|
"write memory".into(),
|
||||||
];
|
];
|
||||||
|
|
||||||
self.shell
|
self.shell
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ use log::info;
|
|||||||
use russh::ChannelMsg;
|
use russh::ChannelMsg;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct BrocadeShell {
|
pub struct BrocadeShell {
|
||||||
ip: IpAddr,
|
ip: IpAddr,
|
||||||
port: u16,
|
port: u16,
|
||||||
@@ -211,7 +210,7 @@ impl BrocadeSession {
|
|||||||
let mut output = Vec::new();
|
let mut output = Vec::new();
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let read_timeout = Duration::from_millis(500);
|
let read_timeout = Duration::from_millis(500);
|
||||||
let log_interval = Duration::from_secs(5);
|
let log_interval = Duration::from_secs(3);
|
||||||
let mut last_log = Instant::now();
|
let mut last_log = Instant::now();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
@@ -221,9 +220,7 @@ impl BrocadeSession {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if start.elapsed() > self.options.timeouts.command_output
|
if start.elapsed() > Duration::from_secs(5) && last_log.elapsed() > log_interval {
|
||||||
&& last_log.elapsed() > log_interval
|
|
||||||
{
|
|
||||||
info!("[Brocade] Waiting for command output...");
|
info!("[Brocade] Waiting for command output...");
|
||||||
last_log = Instant::now();
|
last_log = Instant::now();
|
||||||
}
|
}
|
||||||
@@ -278,7 +275,7 @@ impl BrocadeSession {
|
|||||||
let output_lower = output.to_lowercase();
|
let output_lower = output.to_lowercase();
|
||||||
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
||||||
return Err(Error::CommandError(format!(
|
return Err(Error::CommandError(format!(
|
||||||
"Command error: {}",
|
"Command '{command}' failed: {}",
|
||||||
output.trim()
|
output.trim()
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,14 +24,13 @@ use harmony::{
|
|||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
use harmony_types::{k8s_name::K8sName, net::Url};
|
use harmony_types::net::Url;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let discord_receiver = DiscordWebhook {
|
let discord_receiver = DiscordWebhook {
|
||||||
name: K8sName("test-discord".to_string()),
|
name: "test-discord".to_string(),
|
||||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||||
selectors: vec![],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ use harmony::{
|
|||||||
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
use harmony_types::id::Id;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
use harmony_types::{id::Id, k8s_name::K8sName};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -43,9 +43,8 @@ async fn main() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let discord_receiver = DiscordWebhook {
|
let discord_receiver = DiscordWebhook {
|
||||||
name: K8sName("test-discord".to_string()),
|
name: "test-discord".to_string(),
|
||||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||||
selectors: vec![],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
||||||
|
|||||||
@@ -17,5 +17,3 @@ harmony_secret = { path = "../../harmony_secret" }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde = { workspace = true }
|
|
||||||
brocade = { path = "../../brocade" }
|
|
||||||
|
|||||||
@@ -3,13 +3,12 @@ use std::{
|
|||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::SshKeyPair,
|
config::secret::SshKeyPair,
|
||||||
data::{FileContent, FilePath},
|
data::{FileContent, FilePath},
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
http::StaticFilesHttpScore,
|
http::StaticFilesHttpScore,
|
||||||
@@ -23,9 +22,8 @@ use harmony::{
|
|||||||
topology::{LogicalHost, UnmanagedRouter},
|
topology::{LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, mac_address};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -34,26 +32,6 @@ async fn main() {
|
|||||||
name: String::from("fw0"),
|
name: String::from("fw0"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
|
||||||
.await
|
|
||||||
.expect("Failed to get credentials");
|
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
|
||||||
let brocade_options = Some(BrocadeOptions {
|
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
|
||||||
&switches,
|
|
||||||
&switch_auth.username,
|
|
||||||
&switch_auth.password,
|
|
||||||
brocade_options,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("Failed to connect to switch");
|
|
||||||
|
|
||||||
let switch_client = Arc::new(switch_client);
|
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
let opnsense = Arc::new(
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
);
|
);
|
||||||
@@ -61,7 +39,6 @@ async fn main() {
|
|||||||
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
kubeconfig: None,
|
|
||||||
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
||||||
// when setting up the opnsense firewall
|
// when setting up the opnsense firewall
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
@@ -106,7 +83,7 @@ async fn main() {
|
|||||||
name: "wk2".to_string(),
|
name: "wk2".to_string(),
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
switch_client: switch_client.clone(),
|
switch: vec![],
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
@@ -189,9 +166,3 @@ async fn main() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
|
||||||
pub struct BrocadeSwitchAuth {
|
|
||||||
pub username: String,
|
|
||||||
pub password: String,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,22 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example-okd-cluster-alerts"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
publish = false
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { path = "../../harmony" }
|
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
|
||||||
harmony_types = { path = "../../harmony_types" }
|
|
||||||
harmony_secret = { path = "../../harmony_secret" }
|
|
||||||
harmony_secret_derive = { path = "../../harmony_secret_derive" }
|
|
||||||
cidr = { workspace = true }
|
|
||||||
tokio = { workspace = true }
|
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
|
||||||
log = { workspace = true }
|
|
||||||
env_logger = { workspace = true }
|
|
||||||
url = { workspace = true }
|
|
||||||
serde.workspace = true
|
|
||||||
brocade = { path = "../../brocade" }
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use harmony::{
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::monitoring::{
|
|
||||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
|
||||||
okd::cluster_monitoring::OpenshiftClusterAlertScore,
|
|
||||||
},
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
use harmony_macros::hurl;
|
|
||||||
use harmony_types::k8s_name::K8sName;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let mut sel = HashMap::new();
|
|
||||||
sel.insert(
|
|
||||||
"openshift_io_alert_source".to_string(),
|
|
||||||
"platform".to_string(),
|
|
||||||
);
|
|
||||||
let mut sel2 = HashMap::new();
|
|
||||||
sel2.insert("openshift_io_alert_source".to_string(), "".to_string());
|
|
||||||
let selectors = vec![sel, sel2];
|
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
|
||||||
K8sAnywhereTopology::from_env(),
|
|
||||||
vec![Box::new(OpenshiftClusterAlertScore {
|
|
||||||
receivers: vec![Box::new(DiscordWebhook {
|
|
||||||
name: K8sName("wills-discord-webhook-example".to_string()),
|
|
||||||
url: hurl!("https://something.io"),
|
|
||||||
selectors: selectors,
|
|
||||||
})],
|
|
||||||
})],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -19,4 +19,3 @@ log = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
brocade = { path = "../../brocade" }
|
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use brocade::BrocadeOptions;
|
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{Location, SwitchGroup},
|
hardware::{Location, SwitchGroup},
|
||||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
@@ -23,26 +22,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
|
||||||
.await
|
|
||||||
.expect("Failed to get credentials");
|
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
|
||||||
let brocade_options = Some(BrocadeOptions {
|
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
|
||||||
&switches,
|
|
||||||
&switch_auth.username,
|
|
||||||
&switch_auth.password,
|
|
||||||
brocade_options,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("Failed to connect to switch");
|
|
||||||
|
|
||||||
let switch_client = Arc::new(switch_client);
|
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
||||||
let config = config.unwrap();
|
let config = config.unwrap();
|
||||||
|
|
||||||
@@ -59,7 +38,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
kubeconfig: None,
|
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -80,7 +58,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "bootstrap".to_string(),
|
name: "bootstrap".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch_client: switch_client.clone(),
|
switch: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,9 +75,3 @@ pub fn get_inventory() -> Inventory {
|
|||||||
control_plane_host: vec![],
|
control_plane_host: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
|
||||||
pub struct BrocadeSwitchAuth {
|
|
||||||
pub username: String,
|
|
||||||
pub password: String,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -19,4 +19,3 @@ log = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
brocade = { path = "../../brocade" }
|
|
||||||
|
|||||||
@@ -1,15 +1,13 @@
|
|||||||
use brocade::BrocadeOptions;
|
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::OPNSenseFirewallCredentials,
|
config::secret::OPNSenseFirewallCredentials,
|
||||||
hardware::{Location, SwitchGroup},
|
hardware::{Location, SwitchGroup},
|
||||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::SecretManager;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{net::IpAddr, sync::Arc};
|
use std::{net::IpAddr, sync::Arc};
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
pub async fn get_topology() -> HAClusterTopology {
|
||||||
@@ -18,26 +16,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
|
||||||
.await
|
|
||||||
.expect("Failed to get credentials");
|
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
|
||||||
let brocade_options = Some(BrocadeOptions {
|
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
|
||||||
&switches,
|
|
||||||
&switch_auth.username,
|
|
||||||
&switch_auth.password,
|
|
||||||
brocade_options,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("Failed to connect to switch");
|
|
||||||
|
|
||||||
let switch_client = Arc::new(switch_client);
|
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
||||||
let config = config.unwrap();
|
let config = config.unwrap();
|
||||||
|
|
||||||
@@ -54,7 +32,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
kubeconfig: None,
|
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -75,7 +52,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch_client: switch_client.clone(),
|
switch: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,9 +69,3 @@ pub fn get_inventory() -> Inventory {
|
|||||||
control_plane_host: vec![],
|
control_plane_host: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
|
||||||
pub struct BrocadeSwitchAuth {
|
|
||||||
pub username: String,
|
|
||||||
pub password: String,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -16,6 +16,3 @@ harmony_macros = { path = "../../harmony_macros" }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
harmony_secret = { path = "../../harmony_secret" }
|
|
||||||
brocade = { path = "../../brocade" }
|
|
||||||
serde = { workspace = true }
|
|
||||||
|
|||||||
@@ -3,11 +3,10 @@ use std::{
|
|||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
@@ -19,9 +18,7 @@ use harmony::{
|
|||||||
topology::{LogicalHost, UnmanagedRouter},
|
topology::{LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, mac_address};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -30,26 +27,6 @@ async fn main() {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
|
||||||
.await
|
|
||||||
.expect("Failed to get credentials");
|
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
|
||||||
let brocade_options = Some(BrocadeOptions {
|
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
|
||||||
&switches,
|
|
||||||
&switch_auth.username,
|
|
||||||
&switch_auth.password,
|
|
||||||
brocade_options,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("Failed to connect to switch");
|
|
||||||
|
|
||||||
let switch_client = Arc::new(switch_client);
|
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
let opnsense = Arc::new(
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
);
|
);
|
||||||
@@ -57,7 +34,6 @@ async fn main() {
|
|||||||
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
kubeconfig: None,
|
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -78,7 +54,7 @@ async fn main() {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch_client: switch_client.clone(),
|
switch: vec![],
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
@@ -133,9 +109,3 @@ async fn main() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
|
||||||
pub struct BrocadeSwitchAuth {
|
|
||||||
pub username: String,
|
|
||||||
pub password: String,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example-remove-rook-osd"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
|
||||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
|
||||||
tokio.workspace = true
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
use harmony::{
|
|
||||||
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let ceph_score = CephRemoveOsd {
|
|
||||||
osd_deployment_name: "rook-ceph-osd-2".to_string(),
|
|
||||||
rook_ceph_namespace: "rook-ceph".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let topology = K8sAnywhereTopology::from_env();
|
|
||||||
let inventory = Inventory::autoload();
|
|
||||||
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
@@ -10,7 +10,7 @@ use harmony::{
|
|||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
use harmony_types::{k8s_name::K8sName, net::Url};
|
use harmony_types::net::Url;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -22,9 +22,8 @@ async fn main() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let discord_receiver = DiscordWebhook {
|
let discord_receiver = DiscordWebhook {
|
||||||
name: K8sName("test-discord".to_string()),
|
name: "test-discord".to_string(),
|
||||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||||
selectors: vec![],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
@@ -14,7 +14,6 @@ use harmony::{
|
|||||||
topology::K8sAnywhereTopology,
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use harmony_types::k8s_name::K8sName;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -26,9 +25,8 @@ async fn main() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let discord_receiver = DiscordWebhook {
|
let discord_receiver = DiscordWebhook {
|
||||||
name: K8sName("test-discord".to_string()),
|
name: "test-discord".to_string(),
|
||||||
url: hurl!("https://discord.doesnt.exist.com"),
|
url: hurl!("https://discord.doesnt.exist.com"),
|
||||||
selectors: vec![],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let webhook_receiver = WebhookReceiver {
|
let webhook_receiver = WebhookReceiver {
|
||||||
|
|||||||
Binary file not shown.
@@ -1,7 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: v2
|
|
||||||
name: harmony-example-rust-webapp-chart
|
|
||||||
description: A Helm chart for the harmony-example-rust-webapp web application.
|
|
||||||
type: application
|
|
||||||
version: 0.1.0
|
|
||||||
appVersion: "latest"
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "chart.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
*/}}
|
|
||||||
{{- define "chart.fullname" -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ include "chart.fullname" . }}
|
|
||||||
spec:
|
|
||||||
replicas: {{ .Values.replicaCount }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ include "chart.name" . }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ include "chart.name" . }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 3000
|
|
||||||
protocol: TCP
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
|
|
||||||
{{- if .Values.ingress.enabled -}}
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: {{ include "chart.fullname" . }}
|
|
||||||
annotations:
|
|
||||||
{{- toYaml .Values.ingress.annotations | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
{{- if .Values.ingress.tls }}
|
|
||||||
tls:
|
|
||||||
{{- range .Values.ingress.tls }}
|
|
||||||
- hosts:
|
|
||||||
{{- range .hosts }}
|
|
||||||
- {{ . | quote }}
|
|
||||||
{{- end }}
|
|
||||||
secretName: {{ .secretName }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
rules:
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
- host: {{ .host | quote }}
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
{{- range .paths }}
|
|
||||||
- path: {{ .path }}
|
|
||||||
pathType: {{ .pathType }}
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: {{ include "chart.fullname" $ }}
|
|
||||||
port:
|
|
||||||
number: 3000
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ include "chart.fullname" . }}
|
|
||||||
spec:
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
ports:
|
|
||||||
- port: {{ .Values.service.port }}
|
|
||||||
targetPort: 3000
|
|
||||||
protocol: TCP
|
|
||||||
name: http
|
|
||||||
selector:
|
|
||||||
app: {{ include "chart.name" . }}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
|
|
||||||
# Default values for harmony-example-rust-webapp-chart.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
replicaCount: 1
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: hub.nationtech.io/harmony/harmony-example-rust-webapp
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
# Overridden by the chart's appVersion
|
|
||||||
tag: "latest"
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: ClusterIP
|
|
||||||
port: 3000
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: true
|
|
||||||
# Annotations for cert-manager to handle SSL.
|
|
||||||
annotations:
|
|
||||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
|
||||||
# Add other annotations like nginx ingress class if needed
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
hosts:
|
|
||||||
- host: chart-example.local
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
pathType: ImplementationSpecific
|
|
||||||
tls:
|
|
||||||
- secretName: harmony-example-rust-webapp-tls
|
|
||||||
hosts:
|
|
||||||
- chart-example.local
|
|
||||||
|
|
||||||
@@ -3,14 +3,13 @@ use harmony::{
|
|||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
features::{Monitoring, PackagingDeployment},
|
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use harmony_types::k8s_name::K8sName;
|
|
||||||
use std::{path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
@@ -32,9 +31,8 @@ async fn main() {
|
|||||||
Box::new(Monitoring {
|
Box::new(Monitoring {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
alert_receiver: vec![Box::new(DiscordWebhook {
|
alert_receiver: vec![Box::new(DiscordWebhook {
|
||||||
name: K8sName("test-discord".to_string()),
|
name: "test-discord".to_string(),
|
||||||
url: hurl!("https://discord.doesnt.exist.com"),
|
url: hurl!("https://discord.doesnt.exist.com"),
|
||||||
selectors: vec![],
|
|
||||||
})],
|
})],
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ pub type FirewallGroup = Vec<PhysicalHost>;
|
|||||||
pub struct PhysicalHost {
|
pub struct PhysicalHost {
|
||||||
pub id: Id,
|
pub id: Id,
|
||||||
pub category: HostCategory,
|
pub category: HostCategory,
|
||||||
pub network: Vec<NetworkInterface>,
|
pub network: Vec<NetworkInterface>, // FIXME: Don't use harmony_inventory_agent::NetworkInterface
|
||||||
pub storage: Vec<StorageDrive>,
|
pub storage: Vec<StorageDrive>, // FIXME: Don't use harmony_inventory_agent::StorageDrive
|
||||||
pub labels: Vec<Label>,
|
pub labels: Vec<Label>,
|
||||||
pub memory_modules: Vec<MemoryModule>,
|
pub memory_modules: Vec<MemoryModule>, // FIXME: Don't use harmony_inventory_agent::MemoryModule
|
||||||
pub cpus: Vec<CPU>,
|
pub cpus: Vec<CPU>, // FIXME: Don't use harmony_inventory_agent::CPU
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PhysicalHost {
|
impl PhysicalHost {
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ pub enum InterpretName {
|
|||||||
Lamp,
|
Lamp,
|
||||||
ApplicationMonitoring,
|
ApplicationMonitoring,
|
||||||
K8sPrometheusCrdAlerting,
|
K8sPrometheusCrdAlerting,
|
||||||
CephRemoveOsd,
|
|
||||||
DiscoverInventoryAgent,
|
DiscoverInventoryAgent,
|
||||||
CephClusterHealth,
|
CephClusterHealth,
|
||||||
Custom(&'static str),
|
Custom(&'static str),
|
||||||
@@ -62,7 +61,6 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::Lamp => f.write_str("LAMP"),
|
InterpretName::Lamp => f.write_str("LAMP"),
|
||||||
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
||||||
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
||||||
InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"),
|
|
||||||
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
||||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||||
InterpretName::Custom(name) => f.write_str(name),
|
InterpretName::Custom(name) => f.write_str(name),
|
||||||
|
|||||||
@@ -1,28 +1,56 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::{
|
use harmony_secret::SecretManager;
|
||||||
net::{MacAddress, Url},
|
use harmony_types::net::MacAddress;
|
||||||
switch::PortLocation,
|
use harmony_types::net::Url;
|
||||||
};
|
use harmony_types::switch::PortLocation;
|
||||||
|
use k8s_openapi::api::core::v1::Namespace;
|
||||||
use kube::api::ObjectMeta;
|
use kube::api::ObjectMeta;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy};
|
use crate::data::FileContent;
|
||||||
|
use crate::executors::ExecutorError;
|
||||||
|
use crate::hardware::PhysicalHost;
|
||||||
|
use crate::infra::brocade::BrocadeSwitchAuth;
|
||||||
|
use crate::infra::brocade::BrocadeSwitchClient;
|
||||||
|
use crate::modules::okd::crd::InstallPlanApproval;
|
||||||
|
use crate::modules::okd::crd::OperatorGroup;
|
||||||
|
use crate::modules::okd::crd::OperatorGroupSpec;
|
||||||
|
use crate::modules::okd::crd::Subscription;
|
||||||
|
use crate::modules::okd::crd::SubscriptionSpec;
|
||||||
|
use crate::modules::okd::crd::nmstate;
|
||||||
|
use crate::modules::okd::crd::nmstate::NMState;
|
||||||
|
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicy;
|
||||||
|
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec;
|
||||||
use crate::topology::PxeOptions;
|
use crate::topology::PxeOptions;
|
||||||
use crate::{data::FileContent, modules::okd::crd::nmstate::NMState};
|
|
||||||
use crate::{
|
|
||||||
executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{
|
use super::DHCPStaticEntry;
|
||||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
use super::DhcpServer;
|
||||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
use super::DnsRecord;
|
||||||
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
use super::DnsRecordType;
|
||||||
Topology, k8s::K8sClient,
|
use super::DnsServer;
|
||||||
};
|
use super::Firewall;
|
||||||
|
use super::HostNetworkConfig;
|
||||||
|
use super::HttpServer;
|
||||||
|
use super::IpAddress;
|
||||||
|
use super::K8sclient;
|
||||||
|
use super::LoadBalancer;
|
||||||
|
use super::LoadBalancerService;
|
||||||
|
use super::LogicalHost;
|
||||||
|
use super::PreparationError;
|
||||||
|
use super::PreparationOutcome;
|
||||||
|
use super::Router;
|
||||||
|
use super::Switch;
|
||||||
|
use super::SwitchClient;
|
||||||
|
use super::SwitchError;
|
||||||
|
use super::TftpServer;
|
||||||
|
|
||||||
|
use super::Topology;
|
||||||
|
use super::k8s::K8sClient;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use std::net::IpAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@@ -35,11 +63,10 @@ pub struct HAClusterTopology {
|
|||||||
pub tftp_server: Arc<dyn TftpServer>,
|
pub tftp_server: Arc<dyn TftpServer>,
|
||||||
pub http_server: Arc<dyn HttpServer>,
|
pub http_server: Arc<dyn HttpServer>,
|
||||||
pub dns_server: Arc<dyn DnsServer>,
|
pub dns_server: Arc<dyn DnsServer>,
|
||||||
pub switch_client: Arc<dyn SwitchClient>,
|
|
||||||
pub bootstrap_host: LogicalHost,
|
pub bootstrap_host: LogicalHost,
|
||||||
pub control_plane: Vec<LogicalHost>,
|
pub control_plane: Vec<LogicalHost>,
|
||||||
pub workers: Vec<LogicalHost>,
|
pub workers: Vec<LogicalHost>,
|
||||||
pub kubeconfig: Option<String>,
|
pub switch: Vec<LogicalHost>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -58,17 +85,9 @@ impl Topology for HAClusterTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl K8sclient for HAClusterTopology {
|
impl K8sclient for HAClusterTopology {
|
||||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||||
match &self.kubeconfig {
|
Ok(Arc::new(
|
||||||
None => Ok(Arc::new(
|
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
))
|
||||||
)),
|
|
||||||
Some(kubeconfig) => {
|
|
||||||
let Some(client) = K8sClient::from_kubeconfig(&kubeconfig).await else {
|
|
||||||
return Err("Failed to create k8s client".to_string());
|
|
||||||
};
|
|
||||||
Ok(Arc::new(client))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,47 +113,59 @@ impl HAClusterTopology {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
||||||
|
// FIXME: Find a way to check nmstate is already available (get pod -n openshift-nmstate)
|
||||||
|
debug!("Installing NMState operator...");
|
||||||
let k8s_client = self.k8s_client().await?;
|
let k8s_client = self.k8s_client().await?;
|
||||||
|
|
||||||
debug!("Installing NMState controller...");
|
let nmstate_namespace = Namespace {
|
||||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
metadata: ObjectMeta {
|
||||||
").unwrap(), Some("nmstate"))
|
name: Some("openshift-nmstate".to_string()),
|
||||||
.await
|
finalizers: Some(vec!["kubernetes".to_string()]),
|
||||||
.map_err(|e| e.to_string())?;
|
..Default::default()
|
||||||
|
},
|
||||||
debug!("Creating NMState namespace...");
|
..Default::default()
|
||||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
};
|
||||||
").unwrap(), Some("nmstate"))
|
debug!("Creating NMState namespace: {nmstate_namespace:#?}");
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
debug!("Creating NMState service account...");
|
|
||||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
debug!("Creating NMState role...");
|
|
||||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
debug!("Creating NMState role binding...");
|
|
||||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
debug!("Creating NMState operator...");
|
|
||||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
k8s_client
|
k8s_client
|
||||||
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
.apply(&nmstate_namespace, None)
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
let nmstate_operator_group = OperatorGroup {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("openshift-nmstate".to_string()),
|
||||||
|
namespace: Some("openshift-nmstate".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: OperatorGroupSpec {
|
||||||
|
target_namespaces: vec!["openshift-nmstate".to_string()],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
debug!("Creating NMState operator group: {nmstate_operator_group:#?}");
|
||||||
|
k8s_client
|
||||||
|
.apply(&nmstate_operator_group, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
let nmstate_subscription = Subscription {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("kubernetes-nmstate-operator".to_string()),
|
||||||
|
namespace: Some("openshift-nmstate".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: SubscriptionSpec {
|
||||||
|
channel: Some("stable".to_string()),
|
||||||
|
install_plan_approval: Some(InstallPlanApproval::Automatic),
|
||||||
|
name: "kubernetes-nmstate-operator".to_string(),
|
||||||
|
source: "redhat-operators".to_string(),
|
||||||
|
source_namespace: "openshift-marketplace".to_string(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
debug!("Subscribing to NMState Operator: {nmstate_subscription:#?}");
|
||||||
|
k8s_client
|
||||||
|
.apply(&nmstate_subscription, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
let nmstate = NMState {
|
let nmstate = NMState {
|
||||||
metadata: ObjectMeta {
|
metadata: ObjectMeta {
|
||||||
@@ -156,7 +187,11 @@ impl HAClusterTopology {
|
|||||||
42 // FIXME: Find a better way to declare the bond id
|
42 // FIXME: Find a better way to declare the bond id
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
async fn configure_bond(
|
||||||
|
&self,
|
||||||
|
host: &PhysicalHost,
|
||||||
|
config: &HostNetworkConfig,
|
||||||
|
) -> Result<(), SwitchError> {
|
||||||
self.ensure_nmstate_operator_installed()
|
self.ensure_nmstate_operator_installed()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
@@ -165,33 +200,29 @@ impl HAClusterTopology {
|
|||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let bond_config = self.create_bond_configuration(config);
|
let bond_config = self.create_bond_configuration(host, config);
|
||||||
debug!(
|
debug!("Configuring bond for host {host:?}: {bond_config:#?}");
|
||||||
"Applying NMState bond config for host {}: {bond_config:#?}",
|
|
||||||
config.host_id
|
|
||||||
);
|
|
||||||
self.k8s_client()
|
self.k8s_client()
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.apply(&bond_config, None)
|
.apply(&bond_config, None)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| SwitchError::new(format!("Failed to configure bond: {e}")))?;
|
.unwrap();
|
||||||
|
|
||||||
Ok(())
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_bond_configuration(
|
fn create_bond_configuration(
|
||||||
&self,
|
&self,
|
||||||
|
host: &PhysicalHost,
|
||||||
config: &HostNetworkConfig,
|
config: &HostNetworkConfig,
|
||||||
) -> NodeNetworkConfigurationPolicy {
|
) -> NodeNetworkConfigurationPolicy {
|
||||||
let host_name = &config.host_id;
|
let host_name = host.id.clone();
|
||||||
|
|
||||||
let bond_id = self.get_next_bond_id();
|
let bond_id = self.get_next_bond_id();
|
||||||
let bond_name = format!("bond{bond_id}");
|
let bond_name = format!("bond{bond_id}");
|
||||||
|
|
||||||
info!("Configuring bond '{bond_name}' for host '{host_name}'...");
|
|
||||||
|
|
||||||
let mut bond_mtu: Option<u32> = None;
|
let mut bond_mtu: Option<u32> = None;
|
||||||
let mut copy_mac_from: Option<String> = None;
|
let mut bond_mac_address: Option<String> = None;
|
||||||
let mut bond_ports = Vec::new();
|
let mut bond_ports = Vec::new();
|
||||||
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
||||||
|
|
||||||
@@ -217,14 +248,14 @@ impl HAClusterTopology {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
});
|
});
|
||||||
|
|
||||||
bond_ports.push(interface_name.clone());
|
bond_ports.push(interface_name);
|
||||||
|
|
||||||
// Use the first port's details for the bond mtu and mac address
|
// Use the first port's details for the bond mtu and mac address
|
||||||
if bond_mtu.is_none() {
|
if bond_mtu.is_none() {
|
||||||
bond_mtu = Some(switch_port.interface.mtu);
|
bond_mtu = Some(switch_port.interface.mtu);
|
||||||
}
|
}
|
||||||
if copy_mac_from.is_none() {
|
if bond_mac_address.is_none() {
|
||||||
copy_mac_from = Some(interface_name);
|
bond_mac_address = Some(switch_port.interface.mac_address.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +264,8 @@ impl HAClusterTopology {
|
|||||||
description: Some(format!("Network bond for host {host_name}")),
|
description: Some(format!("Network bond for host {host_name}")),
|
||||||
r#type: "bond".to_string(),
|
r#type: "bond".to_string(),
|
||||||
state: "up".to_string(),
|
state: "up".to_string(),
|
||||||
copy_mac_from,
|
mtu: bond_mtu,
|
||||||
|
mac_address: bond_mac_address,
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
dhcp: Some(true),
|
dhcp: Some(true),
|
||||||
enabled: Some(true),
|
enabled: Some(true),
|
||||||
@@ -268,12 +300,37 @@ impl HAClusterTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
async fn get_switch_client(&self) -> Result<Box<dyn SwitchClient>, SwitchError> {
|
||||||
|
let auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to get credentials: {e}")))?;
|
||||||
|
|
||||||
|
// FIXME: We assume Brocade switches
|
||||||
|
let switches: Vec<IpAddr> = self.switch.iter().map(|s| s.ip).collect();
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *crate::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let client =
|
||||||
|
BrocadeSwitchClient::init(&switches, &auth.username, &auth.password, brocade_options)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to connect to switch: {e}")))?;
|
||||||
|
|
||||||
|
Ok(Box::new(client))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(
|
||||||
|
&self,
|
||||||
|
host: &PhysicalHost,
|
||||||
|
config: &HostNetworkConfig,
|
||||||
|
) -> Result<(), SwitchError> {
|
||||||
debug!("Configuring port channel: {config:#?}");
|
debug!("Configuring port channel: {config:#?}");
|
||||||
|
let client = self.get_switch_client().await?;
|
||||||
|
|
||||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||||
|
|
||||||
self.switch_client
|
client
|
||||||
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
.configure_port_channel(&format!("Harmony_{}", host.id), switch_ports)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
||||||
|
|
||||||
@@ -288,7 +345,6 @@ impl HAClusterTopology {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
kubeconfig: None,
|
|
||||||
domain_name: "DummyTopology".to_string(),
|
domain_name: "DummyTopology".to_string(),
|
||||||
router: dummy_infra.clone(),
|
router: dummy_infra.clone(),
|
||||||
load_balancer: dummy_infra.clone(),
|
load_balancer: dummy_infra.clone(),
|
||||||
@@ -297,10 +353,10 @@ impl HAClusterTopology {
|
|||||||
tftp_server: dummy_infra.clone(),
|
tftp_server: dummy_infra.clone(),
|
||||||
http_server: dummy_infra.clone(),
|
http_server: dummy_infra.clone(),
|
||||||
dns_server: dummy_infra.clone(),
|
dns_server: dummy_infra.clone(),
|
||||||
switch_client: dummy_infra.clone(),
|
|
||||||
bootstrap_host: dummy_host,
|
bootstrap_host: dummy_host,
|
||||||
control_plane: vec![],
|
control_plane: vec![],
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
switch: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -458,7 +514,8 @@ impl HttpServer for HAClusterTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Switch for HAClusterTopology {
|
impl Switch for HAClusterTopology {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
self.switch_client.setup().await?;
|
let client = self.get_switch_client().await?;
|
||||||
|
client.setup().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -466,13 +523,18 @@ impl Switch for HAClusterTopology {
|
|||||||
&self,
|
&self,
|
||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
let port = self.switch_client.find_port(mac_address).await?;
|
let client = self.get_switch_client().await?;
|
||||||
|
let port = client.find_port(mac_address).await?;
|
||||||
Ok(port)
|
Ok(port)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
async fn configure_host_network(
|
||||||
self.configure_bond(config).await?;
|
&self,
|
||||||
self.configure_port_channel(config).await
|
host: &PhysicalHost,
|
||||||
|
config: HostNetworkConfig,
|
||||||
|
) -> Result<(), SwitchError> {
|
||||||
|
// self.configure_bond(host, &config).await?;
|
||||||
|
self.configure_port_channel(host, &config).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -662,25 +724,3 @@ impl DnsServer for DummyInfra {
|
|||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl SwitchClient for DummyInfra {
|
|
||||||
async fn setup(&self) -> Result<(), SwitchError> {
|
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn find_port(
|
|
||||||
&self,
|
|
||||||
_mac_address: &MacAddress,
|
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_port_channel(
|
|
||||||
&self,
|
|
||||||
_channel_name: &str,
|
|
||||||
_switch_ports: Vec<PortLocation>,
|
|
||||||
) -> Result<u8, SwitchError> {
|
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -5,16 +5,14 @@ use k8s_openapi::{
|
|||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{
|
api::{
|
||||||
apps::v1::Deployment,
|
apps::v1::Deployment,
|
||||||
core::v1::{Pod, ServiceAccount},
|
core::v1::{Pod, PodStatus},
|
||||||
},
|
},
|
||||||
apimachinery::pkg::version::Info,
|
|
||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Discovery, Error, Resource,
|
Client, Config, Error, Resource,
|
||||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::{DynamicResourceScope, ErrorResponse},
|
core::ErrorResponse,
|
||||||
discovery::{ApiCapabilities, Scope},
|
|
||||||
error::DiscoveryError,
|
error::DiscoveryError,
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
};
|
};
|
||||||
@@ -23,12 +21,11 @@ use kube::{
|
|||||||
api::{ApiResource, GroupVersionKind},
|
api::{ApiResource, GroupVersionKind},
|
||||||
runtime::wait::await_condition,
|
runtime::wait::await_condition,
|
||||||
};
|
};
|
||||||
use log::{debug, error, info, trace, warn};
|
use log::{debug, error, trace};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::json;
|
use serde_json::{Value, json};
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
use tokio::{io::AsyncReadExt, time::sleep};
|
use tokio::{io::AsyncReadExt, time::sleep};
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
#[derive(new, Clone)]
|
#[derive(new, Clone)]
|
||||||
pub struct K8sClient {
|
pub struct K8sClient {
|
||||||
@@ -62,22 +59,6 @@ impl K8sClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
|
|
||||||
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
|
|
||||||
api
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
|
||||||
let client: Client = self.client.clone();
|
|
||||||
let version_info: Info = client.apiserver_version().await?;
|
|
||||||
Ok(version_info)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn discovery(&self) -> Result<Discovery, Error> {
|
|
||||||
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
|
|
||||||
Ok(discovery)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_resource_json_value(
|
pub async fn get_resource_json_value(
|
||||||
&self,
|
&self,
|
||||||
name: &str,
|
name: &str,
|
||||||
@@ -90,25 +71,7 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced_with(self.client.clone(), &gvk)
|
Api::default_namespaced_with(self.client.clone(), &gvk)
|
||||||
};
|
};
|
||||||
|
Ok(resource.get(name).await?)
|
||||||
resource.get(name).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_secret_json_value(
|
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
) -> Result<DynamicObject, Error> {
|
|
||||||
self.get_resource_json_value(
|
|
||||||
name,
|
|
||||||
namespace,
|
|
||||||
&GroupVersionKind {
|
|
||||||
group: "".to_string(),
|
|
||||||
version: "v1".to_string(),
|
|
||||||
kind: "Secret".to_string(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_deployment(
|
pub async fn get_deployment(
|
||||||
@@ -117,15 +80,11 @@ impl K8sClient {
|
|||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
) -> Result<Option<Deployment>, Error> {
|
) -> Result<Option<Deployment>, Error> {
|
||||||
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
||||||
debug!("getting namespaced deployment");
|
|
||||||
Api::namespaced(self.client.clone(), ns)
|
Api::namespaced(self.client.clone(), ns)
|
||||||
} else {
|
} else {
|
||||||
debug!("getting default namespace deployment");
|
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
|
Ok(deps.get_opt(name).await?)
|
||||||
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
|
||||||
deps.get_opt(name).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
||||||
@@ -134,8 +93,7 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
|
Ok(pods.get_opt(name).await?)
|
||||||
pods.get_opt(name).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn scale_deployment(
|
pub async fn scale_deployment(
|
||||||
@@ -156,7 +114,7 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
let pp = PatchParams::default();
|
let pp = PatchParams::default();
|
||||||
let scale = Patch::Merge(&patch);
|
let scale = Patch::Apply(&patch);
|
||||||
deployments.patch_scale(name, &pp, &scale).await?;
|
deployments.patch_scale(name, &pp, &scale).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -178,9 +136,9 @@ impl K8sClient {
|
|||||||
|
|
||||||
pub async fn wait_until_deployment_ready(
|
pub async fn wait_until_deployment_ready(
|
||||||
&self,
|
&self,
|
||||||
name: &str,
|
name: String,
|
||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
timeout: Option<Duration>,
|
timeout: Option<u64>,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let api: Api<Deployment>;
|
let api: Api<Deployment>;
|
||||||
|
|
||||||
@@ -190,9 +148,9 @@ impl K8sClient {
|
|||||||
api = Api::default_namespaced(self.client.clone());
|
api = Api::default_namespaced(self.client.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let establish = await_condition(api, name, conditions::is_deployment_completed());
|
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
||||||
let timeout = timeout.unwrap_or(Duration::from_secs(120));
|
let t = timeout.unwrap_or(300);
|
||||||
let res = tokio::time::timeout(timeout, establish).await;
|
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
|
||||||
|
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -282,7 +240,7 @@ impl K8sClient {
|
|||||||
|
|
||||||
if let Some(s) = status.status {
|
if let Some(s) = status.status {
|
||||||
let mut stdout_buf = String::new();
|
let mut stdout_buf = String::new();
|
||||||
if let Some(mut stdout) = process.stdout() {
|
if let Some(mut stdout) = process.stdout().take() {
|
||||||
stdout
|
stdout
|
||||||
.read_to_string(&mut stdout_buf)
|
.read_to_string(&mut stdout_buf)
|
||||||
.await
|
.await
|
||||||
@@ -354,169 +312,6 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_api_for_dynamic_object(
|
|
||||||
&self,
|
|
||||||
object: &DynamicObject,
|
|
||||||
ns: Option<&str>,
|
|
||||||
) -> Result<Api<DynamicObject>, Error> {
|
|
||||||
let api_resource = object
|
|
||||||
.types
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| {
|
|
||||||
let parts: Vec<&str> = t.api_version.split('/').collect();
|
|
||||||
match parts.as_slice() {
|
|
||||||
[version] => Some(ApiResource::from_gvk(&GroupVersionKind::gvk(
|
|
||||||
"", version, &t.kind,
|
|
||||||
))),
|
|
||||||
[group, version] => Some(ApiResource::from_gvk(&GroupVersionKind::gvk(
|
|
||||||
group, version, &t.kind,
|
|
||||||
))),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::BuildRequest(kube::core::request::Error::Validation(
|
|
||||||
"Invalid apiVersion in DynamicObject {object:#?}".to_string(),
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
match ns {
|
|
||||||
Some(ns) => Ok(Api::namespaced_with(self.client.clone(), ns, &api_resource)),
|
|
||||||
None => Ok(Api::default_namespaced_with(
|
|
||||||
self.client.clone(),
|
|
||||||
&api_resource,
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn apply_dynamic_many(
|
|
||||||
&self,
|
|
||||||
resource: &[DynamicObject],
|
|
||||||
namespace: Option<&str>,
|
|
||||||
force_conflicts: bool,
|
|
||||||
) -> Result<Vec<DynamicObject>, Error> {
|
|
||||||
let mut result = Vec::new();
|
|
||||||
for r in resource.iter() {
|
|
||||||
result.push(self.apply_dynamic(r, namespace, force_conflicts).await?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply DynamicObject resource to the cluster
|
|
||||||
pub async fn apply_dynamic(
|
|
||||||
&self,
|
|
||||||
resource: &DynamicObject,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
force_conflicts: bool,
|
|
||||||
) -> Result<DynamicObject, Error> {
|
|
||||||
// Build API for this dynamic object
|
|
||||||
let api = self.get_api_for_dynamic_object(resource, namespace)?;
|
|
||||||
let name = resource
|
|
||||||
.metadata
|
|
||||||
.name
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::BuildRequest(kube::core::request::Error::Validation(
|
|
||||||
"DynamicObject must have metadata.name".to_string(),
|
|
||||||
))
|
|
||||||
})?
|
|
||||||
.as_str();
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Applying dynamic resource kind={:?} apiVersion={:?} name='{}' ns={:?}",
|
|
||||||
resource.types.as_ref().map(|t| &t.kind),
|
|
||||||
resource.types.as_ref().map(|t| &t.api_version),
|
|
||||||
name,
|
|
||||||
namespace
|
|
||||||
);
|
|
||||||
trace!(
|
|
||||||
"Dynamic resource payload:\n{:#}",
|
|
||||||
serde_json::to_value(resource).unwrap_or(serde_json::Value::Null)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Using same field manager as in apply()
|
|
||||||
let mut patch_params = PatchParams::apply("harmony");
|
|
||||||
patch_params.force = force_conflicts;
|
|
||||||
|
|
||||||
if *crate::config::DRY_RUN {
|
|
||||||
// Dry-run path: fetch current, show diff, and return appropriate object
|
|
||||||
match api.get(name).await {
|
|
||||||
Ok(current) => {
|
|
||||||
trace!("Received current dynamic value {current:#?}");
|
|
||||||
|
|
||||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
|
||||||
|
|
||||||
// Serialize current and new, and strip status from current if present
|
|
||||||
let mut current_yaml =
|
|
||||||
serde_yaml::to_value(¤t).unwrap_or_else(|_| serde_yaml::Value::Null);
|
|
||||||
if let Some(map) = current_yaml.as_mapping_mut() {
|
|
||||||
if map.contains_key(&serde_yaml::Value::String("status".to_string())) {
|
|
||||||
let removed =
|
|
||||||
map.remove(&serde_yaml::Value::String("status".to_string()));
|
|
||||||
trace!("Removed status from current dynamic object: {:?}", removed);
|
|
||||||
} else {
|
|
||||||
trace!(
|
|
||||||
"Did not find status entry for current dynamic object {}/{}",
|
|
||||||
current.metadata.namespace.as_deref().unwrap_or(""),
|
|
||||||
current.metadata.name.as_deref().unwrap_or("")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let current_yaml = serde_yaml::to_string(¤t_yaml)
|
|
||||||
.unwrap_or_else(|_| "Failed to serialize current resource".to_string());
|
|
||||||
let new_yaml = serde_yaml::to_string(resource)
|
|
||||||
.unwrap_or_else(|_| "Failed to serialize new resource".to_string());
|
|
||||||
|
|
||||||
if current_yaml == new_yaml {
|
|
||||||
println!("No changes detected.");
|
|
||||||
return Ok(current);
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("Changes detected:");
|
|
||||||
let diff = TextDiff::from_lines(¤t_yaml, &new_yaml);
|
|
||||||
for change in diff.iter_all_changes() {
|
|
||||||
let sign = match change.tag() {
|
|
||||||
similar::ChangeTag::Delete => "-",
|
|
||||||
similar::ChangeTag::Insert => "+",
|
|
||||||
similar::ChangeTag::Equal => " ",
|
|
||||||
};
|
|
||||||
print!("{}{}", sign, change);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the incoming resource as the would-be applied state
|
|
||||||
Ok(resource.clone())
|
|
||||||
}
|
|
||||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
|
||||||
println!("\nPerforming dry-run for new resource: '{}'", name);
|
|
||||||
println!(
|
|
||||||
"Resource does not exist. It would be created with the following content:"
|
|
||||||
);
|
|
||||||
let new_yaml = serde_yaml::to_string(resource)
|
|
||||||
.unwrap_or_else(|_| "Failed to serialize new resource".to_string());
|
|
||||||
for line in new_yaml.lines() {
|
|
||||||
println!("+{}", line);
|
|
||||||
}
|
|
||||||
Ok(resource.clone())
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("Failed to get dynamic resource '{}': {}", name, e);
|
|
||||||
Err(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Real apply via server-side apply
|
|
||||||
debug!("Patching (server-side apply) dynamic resource '{}'", name);
|
|
||||||
api.patch(name, &patch_params, &Patch::Apply(resource))
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
error!("Failed to apply dynamic resource '{}': {}", name, e);
|
|
||||||
e
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply a resource in namespace
|
/// Apply a resource in namespace
|
||||||
///
|
///
|
||||||
/// See `kubectl apply` for more information on the expected behavior of this function
|
/// See `kubectl apply` for more information on the expected behavior of this function
|
||||||
@@ -551,14 +346,14 @@ impl K8sClient {
|
|||||||
Ok(current) => {
|
Ok(current) => {
|
||||||
trace!("Received current value {current:#?}");
|
trace!("Received current value {current:#?}");
|
||||||
// The resource exists, so we calculate and display a diff.
|
// The resource exists, so we calculate and display a diff.
|
||||||
println!("\nPerforming dry-run for resource: '{name}'");
|
println!("\nPerforming dry-run for resource: '{}'", name);
|
||||||
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
||||||
panic!("Could not serialize current value : {current:#?}")
|
panic!("Could not serialize current value : {current:#?}")
|
||||||
});
|
});
|
||||||
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
||||||
let map = current_yaml.as_mapping_mut().unwrap();
|
let map = current_yaml.as_mapping_mut().unwrap();
|
||||||
let removed = map.remove_entry("status");
|
let removed = map.remove_entry("status");
|
||||||
trace!("Removed status {removed:?}");
|
trace!("Removed status {:?}", removed);
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"Did not find status entry for current object {}/{}",
|
"Did not find status entry for current object {}/{}",
|
||||||
@@ -587,14 +382,14 @@ impl K8sClient {
|
|||||||
similar::ChangeTag::Insert => "+",
|
similar::ChangeTag::Insert => "+",
|
||||||
similar::ChangeTag::Equal => " ",
|
similar::ChangeTag::Equal => " ",
|
||||||
};
|
};
|
||||||
print!("{sign}{change}");
|
print!("{}{}", sign, change);
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been applied.
|
// In a dry run, we return the new resource state that would have been applied.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||||
// The resource does not exist, so the "diff" is the entire new resource.
|
// The resource does not exist, so the "diff" is the entire new resource.
|
||||||
println!("\nPerforming dry-run for new resource: '{name}'");
|
println!("\nPerforming dry-run for new resource: '{}'", name);
|
||||||
println!(
|
println!(
|
||||||
"Resource does not exist. It would be created with the following content:"
|
"Resource does not exist. It would be created with the following content:"
|
||||||
);
|
);
|
||||||
@@ -603,14 +398,14 @@ impl K8sClient {
|
|||||||
|
|
||||||
// Print each line of the new resource with a '+' prefix.
|
// Print each line of the new resource with a '+' prefix.
|
||||||
for line in new_yaml.lines() {
|
for line in new_yaml.lines() {
|
||||||
println!("+{line}");
|
println!("+{}", line);
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been created.
|
// In a dry run, we return the new resource state that would have been created.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// Another API error occurred.
|
// Another API error occurred.
|
||||||
error!("Failed to get resource '{name}': {e}");
|
error!("Failed to get resource '{}': {}", name, e);
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -625,7 +420,7 @@ impl K8sClient {
|
|||||||
where
|
where
|
||||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
<K as Resource>::DynamicType: Default,
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
for r in resource.iter() {
|
for r in resource.iter() {
|
||||||
@@ -690,7 +485,10 @@ impl K8sClient {
|
|||||||
|
|
||||||
// 6. Apply the object to the cluster using Server-Side Apply.
|
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||||
// This will create the resource if it doesn't exist, or update it if it does.
|
// This will create the resource if it doesn't exist, or update it if it does.
|
||||||
println!("Applying '{name}' in namespace '{namespace}'...",);
|
println!(
|
||||||
|
"Applying Argo Application '{}' in namespace '{}'...",
|
||||||
|
name, namespace
|
||||||
|
);
|
||||||
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
||||||
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||||
|
|
||||||
@@ -699,51 +497,6 @@ impl K8sClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply a resource from a URL
|
|
||||||
///
|
|
||||||
/// It is the equivalent of `kubectl apply -f <url>`
|
|
||||||
pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
|
|
||||||
let patch_params = PatchParams::apply("harmony");
|
|
||||||
let discovery = kube::Discovery::new(self.client.clone()).run().await?;
|
|
||||||
|
|
||||||
let yaml = reqwest::get(url)
|
|
||||||
.await
|
|
||||||
.expect("Could not get URL")
|
|
||||||
.text()
|
|
||||||
.await
|
|
||||||
.expect("Could not get content from URL");
|
|
||||||
|
|
||||||
for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") {
|
|
||||||
let obj: DynamicObject =
|
|
||||||
serde_yaml::from_value(doc).expect("cannot apply without valid YAML");
|
|
||||||
let namespace = obj.metadata.namespace.as_deref().or(ns);
|
|
||||||
let type_meta = obj
|
|
||||||
.types
|
|
||||||
.as_ref()
|
|
||||||
.expect("cannot apply object without valid TypeMeta");
|
|
||||||
let gvk = GroupVersionKind::try_from(type_meta)
|
|
||||||
.expect("cannot apply object without valid GroupVersionKind");
|
|
||||||
let name = obj.name_any();
|
|
||||||
|
|
||||||
if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
|
|
||||||
let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
|
|
||||||
trace!(
|
|
||||||
"Applying {}: \n{}",
|
|
||||||
gvk.kind,
|
|
||||||
serde_yaml::to_string(&obj).expect("Failed to serialize YAML")
|
|
||||||
);
|
|
||||||
let data: serde_json::Value =
|
|
||||||
serde_json::to_value(&obj).expect("Failed to serialize JSON");
|
|
||||||
let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
|
|
||||||
debug!("applied {} {}", gvk.kind, name);
|
|
||||||
} else {
|
|
||||||
warn!("Cannot apply document for unknown {gvk:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||||
let k = match Kubeconfig::read_from(path) {
|
let k = match Kubeconfig::read_from(path) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
@@ -763,31 +516,6 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_dynamic_api(
|
|
||||||
resource: ApiResource,
|
|
||||||
capabilities: ApiCapabilities,
|
|
||||||
client: Client,
|
|
||||||
ns: Option<&str>,
|
|
||||||
all: bool,
|
|
||||||
) -> Api<DynamicObject> {
|
|
||||||
if capabilities.scope == Scope::Cluster || all {
|
|
||||||
Api::all_with(client, &resource)
|
|
||||||
} else if let Some(namespace) = ns {
|
|
||||||
Api::namespaced_with(client, namespace, &resource)
|
|
||||||
} else {
|
|
||||||
Api::default_namespaced_with(client, &resource)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
|
|
||||||
use serde::Deserialize;
|
|
||||||
let mut docs = vec![];
|
|
||||||
for de in serde_yaml::Deserializer::from_str(data) {
|
|
||||||
docs.push(serde_yaml::Value::deserialize(de)?);
|
|
||||||
}
|
|
||||||
Ok(docs)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ApplyStrategy<K: Resource> {
|
pub trait ApplyStrategy<K: Resource> {
|
||||||
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,7 @@
|
|||||||
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
use std::{process::Command, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use base64::{Engine, engine::general_purpose};
|
use kube::api::GroupVersionKind;
|
||||||
use k8s_openapi::api::{
|
|
||||||
core::v1::Secret,
|
|
||||||
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
|
|
||||||
};
|
|
||||||
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
|
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
@@ -17,26 +12,14 @@ use crate::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
k3d::K3DInstallationScore,
|
k3d::K3DInstallationScore,
|
||||||
k8s::ingress::{K8sIngressScore, PathType},
|
monitoring::kube_prometheus::crd::{
|
||||||
monitoring::{
|
crd_alertmanager_config::CRDPrometheus,
|
||||||
grafana::{grafana::Grafana, helm::helm_grafana::grafana_helm_chart_score},
|
prometheus_operator::prometheus_operator_helm_chart_score,
|
||||||
kube_prometheus::crd::{
|
rhob_alertmanager_config::RHOBObservability,
|
||||||
crd_alertmanager_config::CRDPrometheus,
|
|
||||||
crd_grafana::{
|
|
||||||
Grafana as GrafanaCRD, GrafanaCom, GrafanaDashboard,
|
|
||||||
GrafanaDashboardDatasource, GrafanaDashboardSpec, GrafanaDatasource,
|
|
||||||
GrafanaDatasourceConfig, GrafanaDatasourceJsonData,
|
|
||||||
GrafanaDatasourceSecureJsonData, GrafanaDatasourceSpec, GrafanaSpec,
|
|
||||||
},
|
|
||||||
crd_prometheuses::LabelSelector,
|
|
||||||
prometheus_operator::prometheus_operator_helm_chart_score,
|
|
||||||
rhob_alertmanager_config::RHOBObservability,
|
|
||||||
service_monitor::ServiceMonitor,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
prometheus::{
|
prometheus::{
|
||||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||||
prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
@@ -64,13 +47,6 @@ struct K8sState {
|
|||||||
message: String,
|
message: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum KubernetesDistribution {
|
|
||||||
OpenshiftFamily,
|
|
||||||
K3sFamily,
|
|
||||||
Default,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
enum K8sSource {
|
enum K8sSource {
|
||||||
LocalK3d,
|
LocalK3d,
|
||||||
@@ -81,7 +57,6 @@ enum K8sSource {
|
|||||||
pub struct K8sAnywhereTopology {
|
pub struct K8sAnywhereTopology {
|
||||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
||||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
||||||
k8s_distribution: Arc<OnceCell<KubernetesDistribution>>,
|
|
||||||
config: Arc<K8sAnywhereConfig>,
|
config: Arc<K8sAnywhereConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,172 +78,41 @@ impl K8sclient for K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Grafana for K8sAnywhereTopology {
|
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
||||||
async fn ensure_grafana_operator(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
debug!("ensure grafana operator");
|
|
||||||
let client = self.k8s_client().await.unwrap();
|
|
||||||
let grafana_gvk = GroupVersionKind {
|
|
||||||
group: "grafana.integreatly.org".to_string(),
|
|
||||||
version: "v1beta1".to_string(),
|
|
||||||
kind: "Grafana".to_string(),
|
|
||||||
};
|
|
||||||
let name = "grafanas.grafana.integreatly.org";
|
|
||||||
let ns = "grafana";
|
|
||||||
|
|
||||||
let grafana_crd = client
|
|
||||||
.get_resource_json_value(name, Some(ns), &grafana_gvk)
|
|
||||||
.await;
|
|
||||||
match grafana_crd {
|
|
||||||
Ok(_) => {
|
|
||||||
return Ok(PreparationOutcome::Success {
|
|
||||||
details: "Found grafana CRDs in cluster".to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(_) => {
|
|
||||||
return self
|
|
||||||
.install_grafana_operator(inventory, Some("grafana"))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let ns = "grafana";
|
|
||||||
|
|
||||||
let mut label = BTreeMap::new();
|
|
||||||
|
|
||||||
label.insert("dashboards".to_string(), "grafana".to_string());
|
|
||||||
|
|
||||||
let label_selector = LabelSelector {
|
|
||||||
match_labels: label.clone(),
|
|
||||||
match_expressions: vec![],
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = self.k8s_client().await?;
|
|
||||||
|
|
||||||
let grafana = self.build_grafana(ns, &label);
|
|
||||||
|
|
||||||
client.apply(&grafana, Some(ns)).await?;
|
|
||||||
//TODO change this to a ensure ready or something better than just a timeout
|
|
||||||
client
|
|
||||||
.wait_until_deployment_ready(
|
|
||||||
"grafana-grafana-deployment",
|
|
||||||
Some("grafana"),
|
|
||||||
Some(Duration::from_secs(30)),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let sa_name = "grafana-grafana-sa";
|
|
||||||
let token_secret_name = "grafana-sa-token-secret";
|
|
||||||
|
|
||||||
let sa_token_secret = self.build_sa_token_secret(token_secret_name, sa_name, ns);
|
|
||||||
|
|
||||||
client.apply(&sa_token_secret, Some(ns)).await?;
|
|
||||||
let secret_gvk = GroupVersionKind {
|
|
||||||
group: "".to_string(),
|
|
||||||
version: "v1".to_string(),
|
|
||||||
kind: "Secret".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let secret = client
|
|
||||||
.get_resource_json_value(token_secret_name, Some(ns), &secret_gvk)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let token = format!(
|
|
||||||
"Bearer {}",
|
|
||||||
self.extract_and_normalize_token(&secret).unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
debug!("creating grafana clusterrole binding");
|
|
||||||
|
|
||||||
let clusterrolebinding =
|
|
||||||
self.build_cluster_rolebinding(sa_name, "cluster-monitoring-view", ns);
|
|
||||||
|
|
||||||
client.apply(&clusterrolebinding, Some(ns)).await?;
|
|
||||||
|
|
||||||
debug!("creating grafana datasource crd");
|
|
||||||
|
|
||||||
let thanos_url = format!(
|
|
||||||
"https://{}",
|
|
||||||
self.get_domain("thanos-querier-openshift-monitoring")
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
let thanos_openshift_datasource = self.build_grafana_datasource(
|
|
||||||
"thanos-openshift-monitoring",
|
|
||||||
ns,
|
|
||||||
&label_selector,
|
|
||||||
&thanos_url,
|
|
||||||
&token,
|
|
||||||
);
|
|
||||||
|
|
||||||
client.apply(&thanos_openshift_datasource, Some(ns)).await?;
|
|
||||||
|
|
||||||
debug!("creating grafana dashboard crd");
|
|
||||||
let dashboard = self.build_grafana_dashboard(ns, &label_selector);
|
|
||||||
|
|
||||||
client.apply(&dashboard, Some(ns)).await?;
|
|
||||||
debug!("creating grafana ingress");
|
|
||||||
let grafana_ingress = self.build_grafana_ingress(ns).await;
|
|
||||||
|
|
||||||
grafana_ingress
|
|
||||||
.interpret(&Inventory::empty(), self)
|
|
||||||
.await
|
|
||||||
.map_err(|e| PreparationError::new(e.to_string()))?;
|
|
||||||
|
|
||||||
Ok(PreparationOutcome::Success {
|
|
||||||
details: "Installed grafana composants".to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl PrometheusMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
|
||||||
async fn install_prometheus(
|
async fn install_prometheus(
|
||||||
&self,
|
&self,
|
||||||
sender: &CRDPrometheus,
|
sender: &CRDPrometheus,
|
||||||
_inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
_receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let client = self.k8s_client().await?;
|
|
||||||
|
|
||||||
for monitor in sender.service_monitor.iter() {
|
|
||||||
client
|
|
||||||
.apply(monitor, Some(&sender.namespace))
|
|
||||||
.await
|
|
||||||
.map_err(|e| PreparationError::new(e.to_string()))?;
|
|
||||||
}
|
|
||||||
Ok(PreparationOutcome::Success {
|
|
||||||
details: "successfuly installed prometheus components".to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_prometheus_operator(
|
|
||||||
&self,
|
|
||||||
sender: &CRDPrometheus,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
) -> Result<PreparationOutcome, PreparationError> {
|
||||||
let po_result = self.ensure_prometheus_operator(sender).await?;
|
let po_result = self.ensure_prometheus_operator(sender).await?;
|
||||||
|
|
||||||
match po_result {
|
if po_result == PreparationOutcome::Noop {
|
||||||
PreparationOutcome::Success { details: _ } => {
|
debug!("Skipping Prometheus CR installation due to missing operator.");
|
||||||
debug!("Detected prometheus crds operator present in cluster.");
|
return Ok(po_result);
|
||||||
return Ok(po_result);
|
}
|
||||||
}
|
|
||||||
PreparationOutcome::Noop => {
|
let result = self
|
||||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
.get_k8s_prometheus_application_score(sender.clone(), receivers)
|
||||||
return Ok(po_result);
|
.await
|
||||||
}
|
.interpret(inventory, self)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(outcome) => match outcome.status {
|
||||||
|
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
||||||
|
details: outcome.message,
|
||||||
|
}),
|
||||||
|
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
||||||
|
_ => Err(PreparationError::new(outcome.message)),
|
||||||
|
},
|
||||||
|
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
||||||
async fn install_prometheus(
|
async fn install_prometheus(
|
||||||
&self,
|
&self,
|
||||||
sender: &RHOBObservability,
|
sender: &RHOBObservability,
|
||||||
@@ -302,14 +146,6 @@ impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
|||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ensure_prometheus_operator(
|
|
||||||
&self,
|
|
||||||
sender: &RHOBObservability,
|
|
||||||
inventory: &Inventory,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for K8sAnywhereTopology {
|
impl Serialize for K8sAnywhereTopology {
|
||||||
@@ -326,7 +162,6 @@ impl K8sAnywhereTopology {
|
|||||||
Self {
|
Self {
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
k8s_state: Arc::new(OnceCell::new()),
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
tenant_manager: Arc::new(OnceCell::new()),
|
||||||
k8s_distribution: Arc::new(OnceCell::new()),
|
|
||||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
config: Arc::new(K8sAnywhereConfig::from_env()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -335,216 +170,10 @@ impl K8sAnywhereTopology {
|
|||||||
Self {
|
Self {
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
k8s_state: Arc::new(OnceCell::new()),
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
tenant_manager: Arc::new(OnceCell::new()),
|
||||||
k8s_distribution: Arc::new(OnceCell::new()),
|
|
||||||
config: Arc::new(config),
|
config: Arc::new(config),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
|
|
||||||
self.k8s_distribution
|
|
||||||
.get_or_try_init(async || {
|
|
||||||
let client = self.k8s_client().await.unwrap();
|
|
||||||
|
|
||||||
let discovery = client.discovery().await.map_err(|e| {
|
|
||||||
PreparationError::new(format!("Could not discover API groups: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let version = client.get_apiserver_version().await.map_err(|e| {
|
|
||||||
PreparationError::new(format!("Could not get server version: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// OpenShift / OKD
|
|
||||||
if discovery
|
|
||||||
.groups()
|
|
||||||
.any(|g| g.name() == "project.openshift.io")
|
|
||||||
{
|
|
||||||
return Ok(KubernetesDistribution::OpenshiftFamily);
|
|
||||||
}
|
|
||||||
|
|
||||||
// K3d / K3s
|
|
||||||
if version.git_version.contains("k3s") {
|
|
||||||
return Ok(KubernetesDistribution::K3sFamily);
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(KubernetesDistribution::Default);
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn extract_and_normalize_token(&self, secret: &DynamicObject) -> Option<String> {
|
|
||||||
let token_b64 = secret
|
|
||||||
.data
|
|
||||||
.get("token")
|
|
||||||
.or_else(|| secret.data.get("data").and_then(|d| d.get("token")))
|
|
||||||
.and_then(|v| v.as_str())?;
|
|
||||||
|
|
||||||
let bytes = general_purpose::STANDARD.decode(token_b64).ok()?;
|
|
||||||
|
|
||||||
let s = String::from_utf8(bytes).ok()?;
|
|
||||||
|
|
||||||
let cleaned = s
|
|
||||||
.trim_matches(|c: char| c.is_whitespace() || c == '\0')
|
|
||||||
.to_string();
|
|
||||||
Some(cleaned)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_cluster_rolebinding(
|
|
||||||
&self,
|
|
||||||
service_account_name: &str,
|
|
||||||
clusterrole_name: &str,
|
|
||||||
ns: &str,
|
|
||||||
) -> ClusterRoleBinding {
|
|
||||||
ClusterRoleBinding {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(format!("{}-view-binding", service_account_name)),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
role_ref: RoleRef {
|
|
||||||
api_group: "rbac.authorization.k8s.io".into(),
|
|
||||||
kind: "ClusterRole".into(),
|
|
||||||
name: clusterrole_name.into(),
|
|
||||||
},
|
|
||||||
subjects: Some(vec![Subject {
|
|
||||||
kind: "ServiceAccount".into(),
|
|
||||||
name: service_account_name.into(),
|
|
||||||
namespace: Some(ns.into()),
|
|
||||||
..Default::default()
|
|
||||||
}]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_sa_token_secret(
|
|
||||||
&self,
|
|
||||||
secret_name: &str,
|
|
||||||
service_account_name: &str,
|
|
||||||
ns: &str,
|
|
||||||
) -> Secret {
|
|
||||||
let mut annotations = BTreeMap::new();
|
|
||||||
annotations.insert(
|
|
||||||
"kubernetes.io/service-account.name".to_string(),
|
|
||||||
service_account_name.to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Secret {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(secret_name.into()),
|
|
||||||
namespace: Some(ns.into()),
|
|
||||||
annotations: Some(annotations),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_grafana_datasource(
|
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
ns: &str,
|
|
||||||
label_selector: &LabelSelector,
|
|
||||||
url: &str,
|
|
||||||
token: &str,
|
|
||||||
) -> GrafanaDatasource {
|
|
||||||
let mut json_data = BTreeMap::new();
|
|
||||||
json_data.insert("timeInterval".to_string(), "5s".to_string());
|
|
||||||
|
|
||||||
GrafanaDatasource {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(name.to_string()),
|
|
||||||
namespace: Some(ns.to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: GrafanaDatasourceSpec {
|
|
||||||
instance_selector: label_selector.clone(),
|
|
||||||
allow_cross_namespace_import: Some(true),
|
|
||||||
values_from: None,
|
|
||||||
datasource: GrafanaDatasourceConfig {
|
|
||||||
access: "proxy".to_string(),
|
|
||||||
name: name.to_string(),
|
|
||||||
r#type: "prometheus".to_string(),
|
|
||||||
url: url.to_string(),
|
|
||||||
database: None,
|
|
||||||
json_data: Some(GrafanaDatasourceJsonData {
|
|
||||||
time_interval: Some("60s".to_string()),
|
|
||||||
http_header_name1: Some("Authorization".to_string()),
|
|
||||||
tls_skip_verify: Some(true),
|
|
||||||
oauth_pass_thru: Some(true),
|
|
||||||
}),
|
|
||||||
secure_json_data: Some(GrafanaDatasourceSecureJsonData {
|
|
||||||
http_header_value1: Some(format!("Bearer {token}")),
|
|
||||||
}),
|
|
||||||
is_default: Some(false),
|
|
||||||
editable: Some(true),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_grafana_dashboard(
|
|
||||||
&self,
|
|
||||||
ns: &str,
|
|
||||||
label_selector: &LabelSelector,
|
|
||||||
) -> GrafanaDashboard {
|
|
||||||
let graf_dashboard = GrafanaDashboard {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(format!("grafana-dashboard-{}", ns)),
|
|
||||||
namespace: Some(ns.to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: GrafanaDashboardSpec {
|
|
||||||
resync_period: Some("30s".to_string()),
|
|
||||||
instance_selector: label_selector.clone(),
|
|
||||||
datasources: Some(vec![GrafanaDashboardDatasource {
|
|
||||||
input_name: "DS_PROMETHEUS".to_string(),
|
|
||||||
datasource_name: "thanos-openshift-monitoring".to_string(),
|
|
||||||
}]),
|
|
||||||
json: None,
|
|
||||||
grafana_com: Some(GrafanaCom {
|
|
||||||
id: 17406,
|
|
||||||
revision: None,
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
graf_dashboard
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_grafana(&self, ns: &str, labels: &BTreeMap<String, String>) -> GrafanaCRD {
|
|
||||||
let grafana = GrafanaCRD {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(format!("grafana-{}", ns)),
|
|
||||||
namespace: Some(ns.to_string()),
|
|
||||||
labels: Some(labels.clone()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: GrafanaSpec {
|
|
||||||
config: None,
|
|
||||||
admin_user: None,
|
|
||||||
admin_password: None,
|
|
||||||
ingress: None,
|
|
||||||
persistence: None,
|
|
||||||
resources: None,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
grafana
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn build_grafana_ingress(&self, ns: &str) -> K8sIngressScore {
|
|
||||||
let domain = self.get_domain(&format!("grafana-{}", ns)).await.unwrap();
|
|
||||||
let name = format!("{}-grafana", ns);
|
|
||||||
let backend_service = format!("grafana-{}-service", ns);
|
|
||||||
|
|
||||||
K8sIngressScore {
|
|
||||||
name: fqdn::fqdn!(&name),
|
|
||||||
host: fqdn::fqdn!(&domain),
|
|
||||||
backend_service: fqdn::fqdn!(&backend_service),
|
|
||||||
port: 3000,
|
|
||||||
path: Some("/".to_string()),
|
|
||||||
path_type: Some(PathType::Prefix),
|
|
||||||
namespace: Some(fqdn::fqdn!(&ns)),
|
|
||||||
ingress_class_name: Some("openshift-default".to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
async fn get_cluster_observability_operator_prometheus_application_score(
|
||||||
&self,
|
&self,
|
||||||
sender: RHOBObservability,
|
sender: RHOBObservability,
|
||||||
@@ -562,14 +191,13 @@ impl K8sAnywhereTopology {
|
|||||||
&self,
|
&self,
|
||||||
sender: CRDPrometheus,
|
sender: CRDPrometheus,
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||||
service_monitors: Option<Vec<ServiceMonitor>>,
|
|
||||||
) -> K8sPrometheusCRDAlertingScore {
|
) -> K8sPrometheusCRDAlertingScore {
|
||||||
return K8sPrometheusCRDAlertingScore {
|
K8sPrometheusCRDAlertingScore {
|
||||||
sender,
|
sender,
|
||||||
receivers: receivers.unwrap_or_default(),
|
receivers: receivers.unwrap_or_default(),
|
||||||
service_monitors: service_monitors.unwrap_or_default(),
|
service_monitors: vec![],
|
||||||
prometheus_rules: vec![],
|
prometheus_rules: vec![],
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
||||||
@@ -837,30 +465,6 @@ impl K8sAnywhereTopology {
|
|||||||
details: "prometheus operator present in cluster".into(),
|
details: "prometheus operator present in cluster".into(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn install_grafana_operator(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
ns: Option<&str>,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let namespace = ns.unwrap_or("grafana");
|
|
||||||
info!("installing grafana operator in ns {namespace}");
|
|
||||||
let tenant = self.get_k8s_tenant_manager()?.get_tenant_config().await;
|
|
||||||
let mut namespace_scope = false;
|
|
||||||
if tenant.is_some() {
|
|
||||||
namespace_scope = true;
|
|
||||||
}
|
|
||||||
let _grafana_operator_score = grafana_helm_chart_score(namespace, namespace_scope)
|
|
||||||
.interpret(inventory, self)
|
|
||||||
.await
|
|
||||||
.map_err(|e| PreparationError::new(e.to_string()));
|
|
||||||
Ok(PreparationOutcome::Success {
|
|
||||||
details: format!(
|
|
||||||
"Successfully installed grafana operator in ns {}",
|
|
||||||
ns.unwrap()
|
|
||||||
),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
|
|||||||
@@ -28,7 +28,13 @@ pub trait LoadBalancer: Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
service: &LoadBalancerService,
|
service: &LoadBalancerService,
|
||||||
) -> Result<(), ExecutorError> {
|
) -> Result<(), ExecutorError> {
|
||||||
self.add_service(service).await?;
|
debug!(
|
||||||
|
"Listing LoadBalancer services {:?}",
|
||||||
|
self.list_services().await
|
||||||
|
);
|
||||||
|
if !self.list_services().await.contains(service) {
|
||||||
|
self.add_service(service).await?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,8 @@
|
|||||||
use std::{
|
use std::{error::Error, net::Ipv4Addr, str::FromStr, sync::Arc};
|
||||||
error::Error,
|
|
||||||
fmt::{self, Debug},
|
|
||||||
net::Ipv4Addr,
|
|
||||||
str::FromStr,
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
id::Id,
|
|
||||||
net::{IpAddress, MacAddress},
|
net::{IpAddress, MacAddress},
|
||||||
switch::PortLocation,
|
switch::PortLocation,
|
||||||
};
|
};
|
||||||
@@ -26,8 +19,8 @@ pub struct DHCPStaticEntry {
|
|||||||
pub ip: Ipv4Addr,
|
pub ip: Ipv4Addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for DHCPStaticEntry {
|
impl std::fmt::Display for DHCPStaticEntry {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let mac = self
|
let mac = self
|
||||||
.mac
|
.mac
|
||||||
.iter()
|
.iter()
|
||||||
@@ -49,8 +42,8 @@ pub trait Firewall: Send + Sync {
|
|||||||
fn get_host(&self) -> LogicalHost;
|
fn get_host(&self) -> LogicalHost;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for dyn Firewall {
|
impl std::fmt::Debug for dyn Firewall {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
|
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -72,7 +65,7 @@ pub struct PxeOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait DhcpServer: Send + Sync + Debug {
|
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
|
||||||
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
||||||
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
||||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
||||||
@@ -111,8 +104,8 @@ pub trait DnsServer: Send + Sync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for dyn DnsServer {
|
impl std::fmt::Debug for dyn DnsServer {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
|
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -148,8 +141,8 @@ pub enum DnsRecordType {
|
|||||||
TXT,
|
TXT,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for DnsRecordType {
|
impl std::fmt::Display for DnsRecordType {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
DnsRecordType::A => write!(f, "A"),
|
DnsRecordType::A => write!(f, "A"),
|
||||||
DnsRecordType::AAAA => write!(f, "AAAA"),
|
DnsRecordType::AAAA => write!(f, "AAAA"),
|
||||||
@@ -192,12 +185,15 @@ pub trait Switch: Send + Sync {
|
|||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
) -> Result<Option<PortLocation>, SwitchError>;
|
) -> Result<Option<PortLocation>, SwitchError>;
|
||||||
|
|
||||||
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
async fn configure_host_network(
|
||||||
|
&self,
|
||||||
|
host: &PhysicalHost,
|
||||||
|
config: HostNetworkConfig,
|
||||||
|
) -> Result<(), SwitchError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct HostNetworkConfig {
|
pub struct HostNetworkConfig {
|
||||||
pub host_id: Id,
|
|
||||||
pub switch_ports: Vec<SwitchPort>,
|
pub switch_ports: Vec<SwitchPort>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -220,8 +216,8 @@ pub struct SwitchError {
|
|||||||
msg: String,
|
msg: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for SwitchError {
|
impl std::fmt::Display for SwitchError {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_str(&self.msg)
|
f.write_str(&self.msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -229,7 +225,7 @@ impl fmt::Display for SwitchError {
|
|||||||
impl Error for SwitchError {}
|
impl Error for SwitchError {}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait SwitchClient: Debug + Send + Sync {
|
pub trait SwitchClient: Send + Sync {
|
||||||
/// Executes essential, idempotent, one-time initial configuration steps.
|
/// Executes essential, idempotent, one-time initial configuration steps.
|
||||||
///
|
///
|
||||||
/// This is an opiniated procedure that setups a switch to provide high availability
|
/// This is an opiniated procedure that setups a switch to provide high availability
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::{any::Any, collections::HashMap};
|
use std::any::Any;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use kube::api::DynamicObject;
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -22,7 +21,6 @@ pub struct AlertingInterpret<S: AlertSender> {
|
|||||||
pub sender: S,
|
pub sender: S,
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
||||||
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
||||||
pub scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -32,7 +30,6 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!("hit sender configure for AlertingInterpret");
|
|
||||||
self.sender.configure(inventory, topology).await?;
|
self.sender.configure(inventory, topology).await?;
|
||||||
for receiver in self.receivers.iter() {
|
for receiver in self.receivers.iter() {
|
||||||
receiver.install(&self.sender).await?;
|
receiver.install(&self.sender).await?;
|
||||||
@@ -41,12 +38,6 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
|||||||
debug!("installing rule: {:#?}", rule);
|
debug!("installing rule: {:#?}", rule);
|
||||||
rule.install(&self.sender).await?;
|
rule.install(&self.sender).await?;
|
||||||
}
|
}
|
||||||
if let Some(targets) = &self.scrape_targets {
|
|
||||||
for target in targets.iter() {
|
|
||||||
debug!("installing scrape_target: {:#?}", target);
|
|
||||||
target.install(&self.sender).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.sender.ensure_installed(inventory, topology).await?;
|
self.sender.ensure_installed(inventory, topology).await?;
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"successfully installed alert sender {}",
|
"successfully installed alert sender {}",
|
||||||
@@ -77,15 +68,6 @@ pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
|||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
||||||
fn as_any(&self) -> &dyn Any;
|
fn as_any(&self) -> &dyn Any;
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct AlertManagerReceiver {
|
|
||||||
pub receiver_config: serde_json::Value,
|
|
||||||
// FIXME we should not leak k8s here. DynamicObject is k8s specific
|
|
||||||
pub additional_ressources: Vec<DynamicObject>,
|
|
||||||
pub route_config: serde_json::Value,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -95,7 +77,6 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
pub trait ScrapeTarget<S: AlertSender> {
|
||||||
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
|
||||||
fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use k8s_openapi::{
|
|||||||
},
|
},
|
||||||
apimachinery::pkg::util::intstr::IntOrString,
|
apimachinery::pkg::util::intstr::IntOrString,
|
||||||
};
|
};
|
||||||
use kube::{Resource, api::DynamicObject};
|
use kube::Resource;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||||
|
use harmony_secret::Secret;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
net::{IpAddress, MacAddress},
|
net::{IpAddress, MacAddress},
|
||||||
switch::{PortDeclaration, PortLocation},
|
switch::{PortDeclaration, PortLocation},
|
||||||
};
|
};
|
||||||
use option_ext::OptionExt;
|
use option_ext::OptionExt;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::topology::{SwitchClient, SwitchError};
|
use crate::topology::{SwitchClient, SwitchError};
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct BrocadeSwitchClient {
|
pub struct BrocadeSwitchClient {
|
||||||
brocade: Box<dyn BrocadeClient + Send + Sync>,
|
brocade: Box<dyn BrocadeClient + Send + Sync>,
|
||||||
}
|
}
|
||||||
@@ -113,6 +114,12 @@ impl SwitchClient for BrocadeSwitchClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@@ -228,7 +235,7 @@ mod tests {
|
|||||||
assert_that!(*configured_interfaces).is_empty();
|
assert_that!(*configured_interfaces).is_empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Clone)]
|
||||||
struct FakeBrocadeClient {
|
struct FakeBrocadeClient {
|
||||||
stack_topology: Vec<InterSwitchLink>,
|
stack_topology: Vec<InterSwitchLink>,
|
||||||
interfaces: Vec<InterfaceInfo>,
|
interfaces: Vec<InterfaceInfo>,
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
|
|||||||
impl InventoryRepositoryFactory {
|
impl InventoryRepositoryFactory {
|
||||||
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
||||||
Ok(Box::new(
|
Ok(Box::new(
|
||||||
SqliteInventoryRepository::new(&DATABASE_URL).await?,
|
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,182 +0,0 @@
|
|||||||
use k8s_openapi::Resource as K8sResource;
|
|
||||||
use kube::api::{ApiResource, DynamicObject, GroupVersionKind};
|
|
||||||
use kube::core::TypeMeta;
|
|
||||||
use serde::Serialize;
|
|
||||||
use serde::de::DeserializeOwned;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
/// Convert a typed Kubernetes resource `K` into a `DynamicObject`.
|
|
||||||
///
|
|
||||||
/// Requirements:
|
|
||||||
/// - `K` must be a k8s_openapi resource (provides static GVK via `Resource`).
|
|
||||||
/// - `K` must have standard Kubernetes shape (metadata + payload fields).
|
|
||||||
///
|
|
||||||
/// Notes:
|
|
||||||
/// - We set `types` (apiVersion/kind) and copy `metadata`.
|
|
||||||
/// - We place the remaining top-level fields into `obj.data` as JSON.
|
|
||||||
/// - Scope is not encoded on the object itself; you still need the corresponding
|
|
||||||
/// `DynamicResource` (derived from K::group/version/kind) when constructing an Api.
|
|
||||||
///
|
|
||||||
/// Example usage:
|
|
||||||
/// let dyn_obj = kube_resource_to_dynamic(secret)?;
|
|
||||||
/// let api: Api<DynamicObject> = Api::namespaced_with(client, "ns", &dr);
|
|
||||||
/// api.patch(&dyn_obj.name_any(), &PatchParams::apply("mgr"), &Patch::Apply(dyn_obj)).await?;
|
|
||||||
pub fn kube_resource_to_dynamic<K>(res: &K) -> Result<DynamicObject, String>
|
|
||||||
where
|
|
||||||
K: K8sResource + Serialize + DeserializeOwned,
|
|
||||||
{
|
|
||||||
// Serialize the typed resource to JSON so we can split metadata and payload
|
|
||||||
let mut v = serde_json::to_value(res).map_err(|e| format!("Failed to serialize : {e}"))?;
|
|
||||||
let obj = v
|
|
||||||
.as_object_mut()
|
|
||||||
.ok_or_else(|| "expected object JSON".to_string())?;
|
|
||||||
|
|
||||||
// Extract and parse metadata into kube::core::ObjectMeta
|
|
||||||
let metadata_value = obj
|
|
||||||
.remove("metadata")
|
|
||||||
.ok_or_else(|| "missing metadata".to_string())?;
|
|
||||||
let metadata: kube::core::ObjectMeta = serde_json::from_value(metadata_value)
|
|
||||||
.map_err(|e| format!("Failed to deserialize : {e}"))?;
|
|
||||||
|
|
||||||
// Name is required for DynamicObject::new; prefer metadata.name
|
|
||||||
let name = metadata
|
|
||||||
.name
|
|
||||||
.clone()
|
|
||||||
.ok_or_else(|| "metadata.name is required".to_string())?;
|
|
||||||
|
|
||||||
// Remaining fields (spec/status/data/etc.) become the dynamic payload
|
|
||||||
let payload = Value::Object(obj.clone());
|
|
||||||
|
|
||||||
// Construct the DynamicObject
|
|
||||||
let mut dyn_obj = DynamicObject::new(
|
|
||||||
&name,
|
|
||||||
&ApiResource::from_gvk(&GroupVersionKind::gvk(K::GROUP, K::VERSION, K::KIND)),
|
|
||||||
);
|
|
||||||
dyn_obj.types = Some(TypeMeta {
|
|
||||||
api_version: api_version_for::<K>(),
|
|
||||||
kind: K::KIND.into(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Preserve namespace/labels/annotations/etc.
|
|
||||||
dyn_obj.metadata = metadata;
|
|
||||||
|
|
||||||
// Attach payload
|
|
||||||
dyn_obj.data = payload;
|
|
||||||
|
|
||||||
Ok(dyn_obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper: compute apiVersion string ("group/version" or "v1" for core).
|
|
||||||
fn api_version_for<K>() -> String
|
|
||||||
where
|
|
||||||
K: K8sResource,
|
|
||||||
{
|
|
||||||
let group = K::GROUP;
|
|
||||||
let version = K::VERSION;
|
|
||||||
if group.is_empty() {
|
|
||||||
version.to_string() // core/v1 => "v1"
|
|
||||||
} else {
|
|
||||||
format!("{}/{}", group, version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
use k8s_openapi::api::{
|
|
||||||
apps::v1::{Deployment, DeploymentSpec},
|
|
||||||
core::v1::{PodTemplateSpec, Secret},
|
|
||||||
};
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use pretty_assertions::assert_eq;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn secret_to_dynamic_roundtrip() {
|
|
||||||
// Create a sample Secret resource
|
|
||||||
let mut secret = Secret {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("my-secret".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Convert to DynamicResource
|
|
||||||
let dynamic: DynamicObject =
|
|
||||||
kube_resource_to_dynamic(&secret).expect("Failed to convert Secret to DynamicResource");
|
|
||||||
|
|
||||||
// Serialize both the original and dynamic resources to Value
|
|
||||||
let original_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
|
||||||
let dynamic_value =
|
|
||||||
serde_json::to_value(&dynamic).expect("Failed to serialize DynamicResource");
|
|
||||||
|
|
||||||
// Assert that they are identical
|
|
||||||
assert_eq!(original_value, dynamic_value);
|
|
||||||
|
|
||||||
secret.metadata.namespace = Some("false".to_string());
|
|
||||||
let modified_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
|
||||||
assert_ne!(modified_value, dynamic_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn deployment_to_dynamic_roundtrip() {
|
|
||||||
// Create a sample Deployment with nested structures
|
|
||||||
let mut deployment = Deployment {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("my-deployment".to_string()),
|
|
||||||
labels: Some({
|
|
||||||
let mut map = std::collections::BTreeMap::new();
|
|
||||||
map.insert("app".to_string(), "nginx".to_string());
|
|
||||||
map
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: Some(DeploymentSpec {
|
|
||||||
replicas: Some(3),
|
|
||||||
selector: Default::default(),
|
|
||||||
template: PodTemplateSpec {
|
|
||||||
metadata: Some(ObjectMeta {
|
|
||||||
labels: Some({
|
|
||||||
let mut map = std::collections::BTreeMap::new();
|
|
||||||
map.insert("app".to_string(), "nginx".to_string());
|
|
||||||
map
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
spec: Some(Default::default()), // PodSpec with empty containers for simplicity
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let dynamic = kube_resource_to_dynamic(&deployment).expect("Failed to convert Deployment");
|
|
||||||
|
|
||||||
let original_value = serde_json::to_value(&deployment).unwrap();
|
|
||||||
let dynamic_value = serde_json::to_value(&dynamic).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(original_value, dynamic_value);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
dynamic.data.get("spec").unwrap().get("replicas").unwrap(),
|
|
||||||
3
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
dynamic
|
|
||||||
.data
|
|
||||||
.get("spec")
|
|
||||||
.unwrap()
|
|
||||||
.get("template")
|
|
||||||
.unwrap()
|
|
||||||
.get("metadata")
|
|
||||||
.unwrap()
|
|
||||||
.get("labels")
|
|
||||||
.unwrap()
|
|
||||||
.get("app")
|
|
||||||
.unwrap()
|
|
||||||
.as_str()
|
|
||||||
.unwrap(),
|
|
||||||
"nginx".to_string()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,6 +3,5 @@ pub mod executors;
|
|||||||
pub mod hp_ilo;
|
pub mod hp_ilo;
|
||||||
pub mod intel_amt;
|
pub mod intel_amt;
|
||||||
pub mod inventory;
|
pub mod inventory;
|
||||||
pub mod kube;
|
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
mod sqlx;
|
mod sqlx;
|
||||||
|
|||||||
@@ -26,13 +26,19 @@ impl LoadBalancer for OPNSenseFirewall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
||||||
|
warn!(
|
||||||
|
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
|
||||||
|
);
|
||||||
let mut config = self.opnsense_config.write().await;
|
let mut config = self.opnsense_config.write().await;
|
||||||
let mut load_balancer = config.load_balancer();
|
|
||||||
|
|
||||||
let (frontend, backend, servers, healthcheck) =
|
let (frontend, backend, servers, healthcheck) =
|
||||||
harmony_load_balancer_service_to_haproxy_xml(service);
|
harmony_load_balancer_service_to_haproxy_xml(service);
|
||||||
|
let mut load_balancer = config.load_balancer();
|
||||||
load_balancer.configure_service(frontend, backend, servers, healthcheck);
|
load_balancer.add_backend(backend);
|
||||||
|
load_balancer.add_frontend(frontend);
|
||||||
|
load_balancer.add_servers(servers);
|
||||||
|
if let Some(healthcheck) = healthcheck {
|
||||||
|
load_balancer.add_healthcheck(healthcheck);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -100,7 +106,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
.backends
|
.backends
|
||||||
.backends
|
.backends
|
||||||
.iter()
|
.iter()
|
||||||
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
|
.find(|b| b.uuid == frontend.default_backend);
|
||||||
|
|
||||||
let mut health_check = None;
|
let mut health_check = None;
|
||||||
match matching_backend {
|
match matching_backend {
|
||||||
@@ -110,7 +116,8 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
warn!(
|
warn!(
|
||||||
"HAProxy config could not find a matching backend for frontend {frontend:?}"
|
"HAProxy config could not find a matching backend for frontend {:?}",
|
||||||
|
frontend
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -145,11 +152,11 @@ pub(crate) fn get_servers_for_backend(
|
|||||||
.servers
|
.servers
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|server| {
|
.filter_map(|server| {
|
||||||
let address = server.address.clone()?;
|
|
||||||
let port = server.port?;
|
|
||||||
|
|
||||||
if backend_servers.contains(&server.uuid.as_str()) {
|
if backend_servers.contains(&server.uuid.as_str()) {
|
||||||
return Some(BackendServer { address, port });
|
return Some(BackendServer {
|
||||||
|
address: server.address.clone(),
|
||||||
|
port: server.port,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
})
|
})
|
||||||
@@ -340,7 +347,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
|
|||||||
name: format!("frontend_{}", service.listening_port),
|
name: format!("frontend_{}", service.listening_port),
|
||||||
bind: service.listening_port.to_string(),
|
bind: service.listening_port.to_string(),
|
||||||
mode: "tcp".to_string(), // TODO do not depend on health check here
|
mode: "tcp".to_string(), // TODO do not depend on health check here
|
||||||
default_backend: Some(backend.uuid.clone()),
|
default_backend: backend.uuid.clone(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
||||||
@@ -354,8 +361,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
|
|||||||
uuid: Uuid::new_v4().to_string(),
|
uuid: Uuid::new_v4().to_string(),
|
||||||
name: format!("{}_{}", &server.address, &server.port),
|
name: format!("{}_{}", &server.address, &server.port),
|
||||||
enabled: 1,
|
enabled: 1,
|
||||||
address: Some(server.address.clone()),
|
address: server.address.clone(),
|
||||||
port: Some(server.port),
|
port: server.port,
|
||||||
mode: "active".to_string(),
|
mode: "active".to_string(),
|
||||||
server_type: "static".to_string(),
|
server_type: "static".to_string(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -378,8 +385,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("192.168.1.1".to_string()),
|
address: "192.168.1.1".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -404,8 +411,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("192.168.1.1".to_string()),
|
address: "192.168.1.1".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -424,8 +431,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("192.168.1.1".to_string()),
|
address: "192.168.1.1".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -446,16 +453,16 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: Some("some-hostname.test.mcd".to_string()),
|
address: "some-hostname.test.mcd".to_string(),
|
||||||
port: Some(80),
|
port: 80,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server2".to_string(),
|
uuid: "server2".to_string(),
|
||||||
address: Some("192.168.1.2".to_string()),
|
address: "192.168.1.2".to_string(),
|
||||||
port: Some(8080),
|
port: 8080,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|||||||
@@ -2,11 +2,7 @@ use crate::modules::application::{
|
|||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
use crate::modules::monitoring::grafana::grafana::Grafana;
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
|
|
||||||
ServiceMonitor, ServiceMonitorSpec,
|
|
||||||
};
|
|
||||||
use crate::topology::MultiTargetTopology;
|
use crate::topology::MultiTargetTopology;
|
||||||
use crate::topology::ingress::Ingress;
|
use crate::topology::ingress::Ingress;
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -18,7 +14,7 @@ use crate::{
|
|||||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::prometheus::prometheus::PrometheusMonitoring,
|
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||||
topology::oberservability::monitoring::AlertReceiver,
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -26,7 +22,6 @@ use base64::{Engine as _, engine::general_purpose};
|
|||||||
use harmony_secret::SecretManager;
|
use harmony_secret::SecretManager;
|
||||||
use harmony_secret_derive::Secret;
|
use harmony_secret_derive::Secret;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -45,8 +40,7 @@ impl<
|
|||||||
+ TenantManager
|
+ TenantManager
|
||||||
+ K8sclient
|
+ K8sclient
|
||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ PrometheusMonitoring<CRDPrometheus>
|
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
||||||
+ Grafana
|
|
||||||
+ Ingress
|
+ Ingress
|
||||||
+ std::fmt::Debug,
|
+ std::fmt::Debug,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
@@ -63,20 +57,10 @@ impl<
|
|||||||
.unwrap_or_else(|| self.application.name());
|
.unwrap_or_else(|| self.application.name());
|
||||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||||
|
|
||||||
let app_service_monitor = ServiceMonitor {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(self.application.name()),
|
|
||||||
namespace: Some(namespace.clone()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: ServiceMonitorSpec::default(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut alerting_score = ApplicationMonitoringScore {
|
let mut alerting_score = ApplicationMonitoringScore {
|
||||||
sender: CRDPrometheus {
|
sender: CRDPrometheus {
|
||||||
namespace: namespace.clone(),
|
namespace: namespace.clone(),
|
||||||
client: topology.k8s_client().await.unwrap(),
|
client: topology.k8s_client().await.unwrap(),
|
||||||
service_monitor: vec![app_service_monitor],
|
|
||||||
},
|
},
|
||||||
application: self.application.clone(),
|
application: self.application.clone(),
|
||||||
receivers: self.alert_receiver.clone(),
|
receivers: self.alert_receiver.clone(),
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use crate::{
|
|||||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::prometheus::prometheus::PrometheusMonitoring,
|
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||||
topology::oberservability::monitoring::AlertReceiver,
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -42,7 +42,7 @@ impl<
|
|||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ Ingress
|
+ Ingress
|
||||||
+ std::fmt::Debug
|
+ std::fmt::Debug
|
||||||
+ PrometheusMonitoring<RHOBObservability>,
|
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(
|
||||||
|
|||||||
@@ -1,209 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use kube::{CustomResource, api::ObjectMeta};
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
|
||||||
pub struct ClusterIssuerScore {
|
|
||||||
email: String,
|
|
||||||
server: String,
|
|
||||||
issuer_name: String,
|
|
||||||
namespace: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"ClusterIssuerScore".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(ClusterIssuerInterpret {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct ClusterIssuerInterpret {
|
|
||||||
score: ClusterIssuerScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
self.apply_cluster_issuer(topology.k8s_client().await.unwrap())
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("ClusterIssuer")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterIssuerInterpret {
|
|
||||||
async fn validate_cert_manager(
|
|
||||||
&self,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let cert_manager = "cert-manager".to_string();
|
|
||||||
let operator_namespace = "openshift-operators".to_string();
|
|
||||||
match client
|
|
||||||
.get_deployment(&cert_manager, Some(&operator_namespace))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(Some(deployment)) => {
|
|
||||||
if let Some(status) = deployment.status {
|
|
||||||
let ready_count = status.ready_replicas.unwrap_or(0);
|
|
||||||
if ready_count >= 1 {
|
|
||||||
return Ok(Outcome::success(format!(
|
|
||||||
"'{}' is ready with {} replica(s).",
|
|
||||||
&cert_manager, ready_count
|
|
||||||
)));
|
|
||||||
} else {
|
|
||||||
return Err(InterpretError::new(
|
|
||||||
"cert-manager operator not ready in cluster".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"failed to get deployment status {} in ns {}",
|
|
||||||
&cert_manager, &operator_namespace
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => Err(InterpretError::new(format!(
|
|
||||||
"Deployment '{}' not found in namespace '{}'.",
|
|
||||||
&cert_manager, &operator_namespace
|
|
||||||
))),
|
|
||||||
Err(e) => Err(InterpretError::new(format!(
|
|
||||||
"Failed to query for deployment '{}': {}",
|
|
||||||
&cert_manager, e
|
|
||||||
))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> {
|
|
||||||
let issuer_name = &self.score.issuer_name;
|
|
||||||
let email = &self.score.email;
|
|
||||||
let server = &self.score.server;
|
|
||||||
let namespace = &self.score.namespace;
|
|
||||||
let cluster_issuer = ClusterIssuer {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(issuer_name.to_string()),
|
|
||||||
namespace: Some(namespace.to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: ClusterIssuerSpec {
|
|
||||||
acme: AcmeSpec {
|
|
||||||
email: email.to_string(),
|
|
||||||
private_key_secret_ref: PrivateKeySecretRef {
|
|
||||||
name: issuer_name.to_string(),
|
|
||||||
},
|
|
||||||
server: server.to_string(),
|
|
||||||
solvers: vec![SolverSpec {
|
|
||||||
http01: Some(Http01Solver {
|
|
||||||
ingress: Http01Ingress {
|
|
||||||
class: "nginx".to_string(),
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(cluster_issuer)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn apply_cluster_issuer(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let namespace = self.score.namespace.clone();
|
|
||||||
self.validate_cert_manager(&client).await?;
|
|
||||||
let cluster_issuer = self.build_cluster_issuer().unwrap();
|
|
||||||
client
|
|
||||||
.apply_yaml(
|
|
||||||
&serde_yaml::to_value(cluster_issuer).unwrap(),
|
|
||||||
Some(&namespace),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"successfully deployed cluster operator: {} in namespace: {}",
|
|
||||||
self.score.issuer_name, self.score.namespace
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "cert-manager.io",
|
|
||||||
version = "v1",
|
|
||||||
kind = "ClusterIssuer",
|
|
||||||
plural = "clusterissuers"
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ClusterIssuerSpec {
|
|
||||||
pub acme: AcmeSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct AcmeSpec {
|
|
||||||
pub email: String,
|
|
||||||
pub private_key_secret_ref: PrivateKeySecretRef,
|
|
||||||
pub server: String,
|
|
||||||
pub solvers: Vec<SolverSpec>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct PrivateKeySecretRef {
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct SolverSpec {
|
|
||||||
pub http01: Option<Http01Solver>,
|
|
||||||
// Other solver types (e.g., dns01) would go here as Options
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Http01Solver {
|
|
||||||
pub ingress: Http01Ingress,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Http01Ingress {
|
|
||||||
pub class: String,
|
|
||||||
}
|
|
||||||
@@ -1,3 +1,2 @@
|
|||||||
pub mod cluster_issuer;
|
|
||||||
mod helm;
|
mod helm;
|
||||||
pub use helm::*;
|
pub use helm::*;
|
||||||
|
|||||||
@@ -38,15 +38,13 @@ impl<
|
|||||||
+ 'static
|
+ 'static
|
||||||
+ Send
|
+ Send
|
||||||
+ Clone,
|
+ Clone,
|
||||||
T: Topology + K8sclient,
|
T: Topology,
|
||||||
> Score<T> for K8sResourceScore<K>
|
> Score<T> for K8sResourceScore<K>
|
||||||
where
|
where
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
Box::new(K8sResourceInterpret {
|
todo!()
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -1,23 +1,18 @@
|
|||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::k8s_name::K8sName;
|
|
||||||
use k8s_openapi::api::core::v1::Secret;
|
use k8s_openapi::api::core::v1::Secret;
|
||||||
use kube::Resource;
|
use kube::api::ObjectMeta;
|
||||||
use kube::api::{DynamicObject, ObjectMeta};
|
use log::debug;
|
||||||
use log::{debug, trace};
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use serde_yaml::{Mapping, Value};
|
use serde_yaml::{Mapping, Value};
|
||||||
|
|
||||||
use crate::infra::kube::kube_resource_to_dynamic;
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
|
||||||
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
|
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
use crate::modules::monitoring::okd::OpenshiftClusterAlertSender;
|
|
||||||
use crate::topology::oberservability::monitoring::AlertManagerReceiver;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::{
|
modules::monitoring::{
|
||||||
@@ -33,13 +28,14 @@ use harmony_types::net::Url;
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct DiscordWebhook {
|
pub struct DiscordWebhook {
|
||||||
pub name: K8sName,
|
pub name: String,
|
||||||
pub url: Url,
|
pub url: Url,
|
||||||
pub selectors: Vec<HashMap<String, String>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscordWebhook {
|
#[async_trait]
|
||||||
fn get_receiver_config(&self) -> Result<AlertManagerReceiver, String> {
|
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||||
|
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||||
|
let ns = sender.namespace.clone();
|
||||||
let secret_name = format!("{}-secret", self.name.clone());
|
let secret_name = format!("{}-secret", self.name.clone());
|
||||||
let webhook_key = format!("{}", self.url.clone());
|
let webhook_key = format!("{}", self.url.clone());
|
||||||
|
|
||||||
@@ -56,91 +52,33 @@ impl DiscordWebhook {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut matchers: Vec<String> = Vec::new();
|
let _ = sender.client.apply(&secret, Some(&ns)).await;
|
||||||
for selector in &self.selectors {
|
|
||||||
trace!("selector: {:#?}", selector);
|
|
||||||
for (k, v) in selector {
|
|
||||||
matchers.push(format!("{} = {}", k, v));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(AlertManagerReceiver {
|
|
||||||
additional_ressources: vec![kube_resource_to_dynamic(&secret)?],
|
|
||||||
|
|
||||||
receiver_config: json!({
|
|
||||||
"name": self.name,
|
|
||||||
"discord_configs": [
|
|
||||||
{
|
|
||||||
"webhook_url": self.url.clone(),
|
|
||||||
"title": "{{ template \"discord.default.title\" . }}",
|
|
||||||
"message": "{{ template \"discord.default.message\" . }}"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}),
|
|
||||||
route_config: json!({
|
|
||||||
"receiver": self.name,
|
|
||||||
"matchers": matchers,
|
|
||||||
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl AlertReceiver<OpenshiftClusterAlertSender> for DiscordWebhook {
|
|
||||||
async fn install(
|
|
||||||
&self,
|
|
||||||
sender: &OpenshiftClusterAlertSender,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
self.name.clone().to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clone_box(&self) -> Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
self.get_receiver_config()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
|
||||||
let ns = sender.namespace.clone();
|
|
||||||
|
|
||||||
let config = self.get_receiver_config()?;
|
|
||||||
for resource in config.additional_ressources.iter() {
|
|
||||||
todo!("can I apply a dynamicresource");
|
|
||||||
// sender.client.apply(resource, Some(&ns)).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
"route": {
|
"route": {
|
||||||
"receiver": self.name,
|
"receiver": self.name,
|
||||||
},
|
},
|
||||||
"receivers": [
|
"receivers": [
|
||||||
config.receiver_config
|
{
|
||||||
|
"name": self.name,
|
||||||
|
"discordConfigs": [
|
||||||
|
{
|
||||||
|
"apiURL": {
|
||||||
|
"name": secret_name,
|
||||||
|
"key": "webhook-url",
|
||||||
|
},
|
||||||
|
"title": "{{ template \"discord.default.title\" . }}",
|
||||||
|
"message": "{{ template \"discord.default.message\" . }}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
]
|
]
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig {
|
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig {
|
||||||
metadata: ObjectMeta {
|
metadata: ObjectMeta {
|
||||||
name: Some(self.name.clone().to_string()),
|
name: Some(self.name.clone()),
|
||||||
labels: Some(std::collections::BTreeMap::from([(
|
labels: Some(std::collections::BTreeMap::from([(
|
||||||
"alertmanagerConfig".to_string(),
|
"alertmanagerConfig".to_string(),
|
||||||
"enabled".to_string(),
|
"enabled".to_string(),
|
||||||
@@ -184,9 +122,6 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||||
let ns = sender.namespace.clone();
|
let ns = sender.namespace.clone();
|
||||||
let secret_name = format!("{}-secret", self.name.clone());
|
let secret_name = format!("{}-secret", self.name.clone());
|
||||||
@@ -232,7 +167,7 @@ impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
|||||||
|
|
||||||
let alertmanager_configs = AlertmanagerConfig {
|
let alertmanager_configs = AlertmanagerConfig {
|
||||||
metadata: ObjectMeta {
|
metadata: ObjectMeta {
|
||||||
name: Some(self.name.clone().to_string()),
|
name: Some(self.name.clone()),
|
||||||
labels: Some(std::collections::BTreeMap::from([(
|
labels: Some(std::collections::BTreeMap::from([(
|
||||||
"alertmanagerConfig".to_string(),
|
"alertmanagerConfig".to_string(),
|
||||||
"enabled".to_string(),
|
"enabled".to_string(),
|
||||||
@@ -265,9 +200,6 @@ impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<Prometheus> for DiscordWebhook {
|
impl AlertReceiver<Prometheus> for DiscordWebhook {
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
@@ -285,7 +217,7 @@ impl AlertReceiver<Prometheus> for DiscordWebhook {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl PrometheusReceiver for DiscordWebhook {
|
impl PrometheusReceiver for DiscordWebhook {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
self.name.clone().to_string()
|
self.name.clone()
|
||||||
}
|
}
|
||||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
||||||
self.get_config().await
|
self.get_config().await
|
||||||
@@ -294,9 +226,6 @@ impl PrometheusReceiver for DiscordWebhook {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
@@ -314,7 +243,7 @@ impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl KubePrometheusReceiver for DiscordWebhook {
|
impl KubePrometheusReceiver for DiscordWebhook {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
self.name.clone().to_string()
|
self.name.clone()
|
||||||
}
|
}
|
||||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
||||||
self.get_config().await
|
self.get_config().await
|
||||||
@@ -341,7 +270,7 @@ impl DiscordWebhook {
|
|||||||
let mut route = Mapping::new();
|
let mut route = Mapping::new();
|
||||||
route.insert(
|
route.insert(
|
||||||
Value::String("receiver".to_string()),
|
Value::String("receiver".to_string()),
|
||||||
Value::String(self.name.clone().to_string()),
|
Value::String(self.name.clone()),
|
||||||
);
|
);
|
||||||
route.insert(
|
route.insert(
|
||||||
Value::String("matchers".to_string()),
|
Value::String("matchers".to_string()),
|
||||||
@@ -355,7 +284,7 @@ impl DiscordWebhook {
|
|||||||
let mut receiver = Mapping::new();
|
let mut receiver = Mapping::new();
|
||||||
receiver.insert(
|
receiver.insert(
|
||||||
Value::String("name".to_string()),
|
Value::String("name".to_string()),
|
||||||
Value::String(self.name.clone().to_string()),
|
Value::String(self.name.clone()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut discord_config = Mapping::new();
|
let mut discord_config = Mapping::new();
|
||||||
@@ -380,9 +309,8 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn discord_serialize_should_match() {
|
async fn discord_serialize_should_match() {
|
||||||
let discord_receiver = DiscordWebhook {
|
let discord_receiver = DiscordWebhook {
|
||||||
name: K8sName("test-discord".to_string()),
|
name: "test-discord".to_string(),
|
||||||
url: Url::Url(url::Url::parse("https://discord.i.dont.exist.com").unwrap()),
|
url: Url::Url(url::Url::parse("https://discord.i.dont.exist.com").unwrap()),
|
||||||
selectors: vec![],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let discord_receiver_receiver =
|
let discord_receiver_receiver =
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||||
},
|
},
|
||||||
topology::oberservability::monitoring::{AlertManagerReceiver, AlertReceiver},
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
|
||||||
@@ -31,9 +31,6 @@ pub struct WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
@@ -100,9 +97,6 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
@@ -164,9 +158,6 @@ impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<Prometheus> for WebhookReceiver {
|
impl AlertReceiver<Prometheus> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
@@ -193,9 +184,6 @@ impl PrometheusReceiver for WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,23 +1,21 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use log::debug;
|
use async_trait::async_trait;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::Interpret,
|
data::Version,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
application::Application,
|
application::Application,
|
||||||
monitoring::{
|
monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
||||||
grafana::grafana::Grafana, kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||||
},
|
|
||||||
prometheus::prometheus::PrometheusMonitoring,
|
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{
|
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||||
K8sclient, Topology,
|
|
||||||
oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct ApplicationMonitoringScore {
|
pub struct ApplicationMonitoringScore {
|
||||||
@@ -26,16 +24,12 @@ pub struct ApplicationMonitoringScore {
|
|||||||
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Score<T>
|
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
||||||
for ApplicationMonitoringScore
|
for ApplicationMonitoringScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
debug!("creating alerting interpret");
|
Box::new(ApplicationMonitoringInterpret {
|
||||||
Box::new(AlertingInterpret {
|
score: self.clone(),
|
||||||
sender: self.sender.clone(),
|
|
||||||
receivers: self.receivers.clone(),
|
|
||||||
rules: vec![],
|
|
||||||
scrape_targets: None,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,3 +40,55 @@ impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Sc
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ApplicationMonitoringInterpret {
|
||||||
|
score: ApplicationMonitoringScore,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
||||||
|
for ApplicationMonitoringInterpret
|
||||||
|
{
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let result = topology
|
||||||
|
.install_prometheus(
|
||||||
|
&self.score.sender,
|
||||||
|
inventory,
|
||||||
|
Some(self.score.receivers.clone()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(outcome) => match outcome {
|
||||||
|
PreparationOutcome::Success { details: _ } => {
|
||||||
|
Ok(Outcome::success("Prometheus installed".into()))
|
||||||
|
}
|
||||||
|
PreparationOutcome::Noop => {
|
||||||
|
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(err) => Err(InterpretError::from(err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::ApplicationMonitoring
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ use crate::{
|
|||||||
monitoring::kube_prometheus::crd::{
|
monitoring::kube_prometheus::crd::{
|
||||||
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
||||||
},
|
},
|
||||||
prometheus::prometheus::PrometheusMonitoring,
|
prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||||
@@ -26,7 +26,7 @@ pub struct ApplicationRHOBMonitoringScore {
|
|||||||
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Score<T>
|
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
||||||
for ApplicationRHOBMonitoringScore
|
for ApplicationRHOBMonitoringScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
@@ -49,7 +49,7 @@ pub struct ApplicationRHOBMonitoringInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Interpret<T>
|
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
||||||
for ApplicationRHOBMonitoringInterpret
|
for ApplicationRHOBMonitoringInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use k8s_openapi::Resource;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
inventory::Inventory,
|
|
||||||
topology::{PreparationError, PreparationOutcome},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Grafana {
|
|
||||||
async fn ensure_grafana_operator(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError>;
|
|
||||||
|
|
||||||
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError>;
|
|
||||||
}
|
|
||||||
@@ -1,28 +1,27 @@
|
|||||||
use harmony_macros::hurl;
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use std::{collections::HashMap, str::FromStr};
|
use std::str::FromStr;
|
||||||
|
|
||||||
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
use crate::modules::helm::chart::HelmChartScore;
|
||||||
|
|
||||||
|
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
|
||||||
|
let values = r#"
|
||||||
|
rbac:
|
||||||
|
namespaced: true
|
||||||
|
sidecar:
|
||||||
|
dashboards:
|
||||||
|
enabled: true
|
||||||
|
"#
|
||||||
|
.to_string();
|
||||||
|
|
||||||
pub fn grafana_helm_chart_score(ns: &str, namespace_scope: bool) -> HelmChartScore {
|
|
||||||
let mut values_overrides = HashMap::new();
|
|
||||||
values_overrides.insert(
|
|
||||||
NonBlankString::from_str("namespaceScope").unwrap(),
|
|
||||||
namespace_scope.to_string(),
|
|
||||||
);
|
|
||||||
HelmChartScore {
|
HelmChartScore {
|
||||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
||||||
release_name: NonBlankString::from_str("grafana-operator").unwrap(),
|
release_name: NonBlankString::from_str("grafana").unwrap(),
|
||||||
chart_name: NonBlankString::from_str("grafana/grafana-operator").unwrap(),
|
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
|
||||||
chart_version: None,
|
chart_version: None,
|
||||||
values_overrides: Some(values_overrides),
|
values_overrides: None,
|
||||||
values_yaml: None,
|
values_yaml: Some(values.to_string()),
|
||||||
create_namespace: true,
|
create_namespace: true,
|
||||||
install_only: true,
|
install_only: true,
|
||||||
repository: Some(HelmRepository::new(
|
repository: None,
|
||||||
"grafana".to_string(),
|
|
||||||
hurl!("https://grafana.github.io/helm-charts"),
|
|
||||||
true,
|
|
||||||
)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,2 +1 @@
|
|||||||
pub mod grafana;
|
|
||||||
pub mod helm;
|
pub mod helm;
|
||||||
|
|||||||
@@ -1,25 +1,12 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use kube::CustomResource;
|
use kube::CustomResource;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::{
|
use crate::topology::{
|
||||||
interpret::{InterpretError, Outcome},
|
k8s::K8sClient,
|
||||||
inventory::Inventory,
|
oberservability::monitoring::{AlertReceiver, AlertSender},
|
||||||
modules::{
|
|
||||||
monitoring::{
|
|
||||||
grafana::grafana::Grafana, kube_prometheus::crd::service_monitor::ServiceMonitor,
|
|
||||||
},
|
|
||||||
prometheus::prometheus::PrometheusMonitoring,
|
|
||||||
},
|
|
||||||
topology::{
|
|
||||||
K8sclient, Topology,
|
|
||||||
installable::Installable,
|
|
||||||
k8s::K8sClient,
|
|
||||||
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
@@ -39,7 +26,6 @@ pub struct AlertmanagerConfigSpec {
|
|||||||
pub struct CRDPrometheus {
|
pub struct CRDPrometheus {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
pub client: Arc<K8sClient>,
|
pub client: Arc<K8sClient>,
|
||||||
pub service_monitor: Vec<ServiceMonitor>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AlertSender for CRDPrometheus {
|
impl AlertSender for CRDPrometheus {
|
||||||
@@ -54,12 +40,6 @@ impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for Box<dyn ScrapeTarget<CRDPrometheus>> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
self.clone_box()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
@@ -68,24 +48,3 @@ impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus> + Grafana> Installable<T>
|
|
||||||
for CRDPrometheus
|
|
||||||
{
|
|
||||||
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
|
||||||
topology.ensure_grafana_operator(inventory).await?;
|
|
||||||
topology.ensure_prometheus_operator(self, inventory).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_installed(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
topology.install_grafana().await?;
|
|
||||||
topology.install_prometheus(&self, inventory, None).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -103,34 +103,9 @@ pub struct GrafanaDashboardSpec {
|
|||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub resync_period: Option<String>,
|
pub resync_period: Option<String>,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub datasources: Option<Vec<GrafanaDashboardDatasource>>,
|
|
||||||
|
|
||||||
pub instance_selector: LabelSelector,
|
pub instance_selector: LabelSelector,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
pub json: String,
|
||||||
pub json: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub grafana_com: Option<GrafanaCom>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct GrafanaDashboardDatasource {
|
|
||||||
pub input_name: String,
|
|
||||||
pub datasource_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct GrafanaCom {
|
|
||||||
pub id: u32,
|
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub revision: Option<u32>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
@@ -151,79 +126,20 @@ pub struct GrafanaDatasourceSpec {
|
|||||||
pub allow_cross_namespace_import: Option<bool>,
|
pub allow_cross_namespace_import: Option<bool>,
|
||||||
|
|
||||||
pub datasource: GrafanaDatasourceConfig,
|
pub datasource: GrafanaDatasourceConfig,
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub values_from: Option<Vec<GrafanaValueFrom>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct GrafanaValueFrom {
|
|
||||||
pub target_path: String,
|
|
||||||
pub value_from: GrafanaValueSource,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct GrafanaValueSource {
|
|
||||||
pub secret_key_ref: GrafanaSecretKeyRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct GrafanaSecretKeyRef {
|
|
||||||
pub name: String,
|
|
||||||
pub key: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct GrafanaDatasourceConfig {
|
pub struct GrafanaDatasourceConfig {
|
||||||
pub access: String,
|
pub access: String,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub database: Option<String>,
|
pub database: Option<String>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub json_data: Option<BTreeMap<String, String>>,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub r#type: String,
|
pub r#type: String,
|
||||||
pub url: String,
|
pub url: String,
|
||||||
/// Represents jsonData in the GrafanaDatasource spec
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub json_data: Option<GrafanaDatasourceJsonData>,
|
|
||||||
|
|
||||||
/// Represents secureJsonData (secrets)
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub secure_json_data: Option<GrafanaDatasourceSecureJsonData>,
|
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub is_default: Option<bool>,
|
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub editable: Option<bool>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct GrafanaDatasourceJsonData {
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub time_interval: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub http_header_name1: Option<String>,
|
|
||||||
|
|
||||||
/// Disable TLS skip verification (false = verify)
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub tls_skip_verify: Option<bool>,
|
|
||||||
|
|
||||||
/// Auth type - set to "forward" for OpenShift OAuth identity
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub oauth_pass_thru: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct GrafanaDatasourceSecureJsonData {
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub http_header_value1: Option<String>,
|
|
||||||
}
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
||||||
|
|||||||
@@ -1,187 +0,0 @@
|
|||||||
use std::net::IpAddr;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use kube::CustomResource;
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
modules::monitoring::kube_prometheus::crd::{
|
|
||||||
crd_alertmanager_config::CRDPrometheus, crd_prometheuses::LabelSelector,
|
|
||||||
},
|
|
||||||
topology::oberservability::monitoring::ScrapeTarget,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "monitoring.coreos.com",
|
|
||||||
version = "v1alpha1",
|
|
||||||
kind = "ScrapeConfig",
|
|
||||||
plural = "scrapeconfigs",
|
|
||||||
namespaced
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ScrapeConfigSpec {
|
|
||||||
/// List of static configurations.
|
|
||||||
pub static_configs: Option<Vec<StaticConfig>>,
|
|
||||||
|
|
||||||
/// Kubernetes service discovery.
|
|
||||||
pub kubernetes_sd_configs: Option<Vec<KubernetesSDConfig>>,
|
|
||||||
|
|
||||||
/// HTTP-based service discovery.
|
|
||||||
pub http_sd_configs: Option<Vec<HttpSDConfig>>,
|
|
||||||
|
|
||||||
/// File-based service discovery.
|
|
||||||
pub file_sd_configs: Option<Vec<FileSDConfig>>,
|
|
||||||
|
|
||||||
/// DNS-based service discovery.
|
|
||||||
pub dns_sd_configs: Option<Vec<DnsSDConfig>>,
|
|
||||||
|
|
||||||
/// Consul service discovery.
|
|
||||||
pub consul_sd_configs: Option<Vec<ConsulSDConfig>>,
|
|
||||||
|
|
||||||
/// Relabeling configuration applied to discovered targets.
|
|
||||||
pub relabel_configs: Option<Vec<RelabelConfig>>,
|
|
||||||
|
|
||||||
/// Metric relabeling configuration applied to scraped samples.
|
|
||||||
pub metric_relabel_configs: Option<Vec<RelabelConfig>>,
|
|
||||||
|
|
||||||
/// Path to scrape metrics from (defaults to `/metrics`).
|
|
||||||
pub metrics_path: Option<String>,
|
|
||||||
|
|
||||||
/// Interval at which Prometheus scrapes targets (e.g., "30s").
|
|
||||||
pub scrape_interval: Option<String>,
|
|
||||||
|
|
||||||
/// Timeout for scraping (e.g., "10s").
|
|
||||||
pub scrape_timeout: Option<String>,
|
|
||||||
|
|
||||||
/// Optional job name override.
|
|
||||||
pub job_name: Option<String>,
|
|
||||||
|
|
||||||
/// Optional scheme (http or https).
|
|
||||||
pub scheme: Option<String>,
|
|
||||||
|
|
||||||
/// Authorization paramaters for snmp walk
|
|
||||||
pub params: Option<Params>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Static configuration section of a ScrapeConfig.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct StaticConfig {
|
|
||||||
pub targets: Vec<String>,
|
|
||||||
|
|
||||||
pub labels: Option<LabelSelector>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Relabeling configuration for target or metric relabeling.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct RelabelConfig {
|
|
||||||
pub source_labels: Option<Vec<String>>,
|
|
||||||
pub separator: Option<String>,
|
|
||||||
pub target_label: Option<String>,
|
|
||||||
pub regex: Option<String>,
|
|
||||||
pub modulus: Option<u64>,
|
|
||||||
pub replacement: Option<String>,
|
|
||||||
pub action: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Kubernetes service discovery configuration.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct KubernetesSDConfig {
|
|
||||||
///"pod", "service", "endpoints"pub role: String,
|
|
||||||
pub namespaces: Option<NamespaceSelector>,
|
|
||||||
pub selectors: Option<Vec<LabelSelector>>,
|
|
||||||
pub api_server: Option<String>,
|
|
||||||
pub bearer_token_file: Option<String>,
|
|
||||||
pub tls_config: Option<TLSConfig>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Namespace selector for Kubernetes service discovery.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NamespaceSelector {
|
|
||||||
pub any: Option<bool>,
|
|
||||||
pub match_names: Option<Vec<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP-based service discovery configuration.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct HttpSDConfig {
|
|
||||||
pub url: String,
|
|
||||||
pub refresh_interval: Option<String>,
|
|
||||||
pub basic_auth: Option<BasicAuth>,
|
|
||||||
pub authorization: Option<Authorization>,
|
|
||||||
pub tls_config: Option<TLSConfig>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// File-based service discovery configuration.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct FileSDConfig {
|
|
||||||
pub files: Vec<String>,
|
|
||||||
pub refresh_interval: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// DNS-based service discovery configuration.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct DnsSDConfig {
|
|
||||||
pub names: Vec<String>,
|
|
||||||
pub refresh_interval: Option<String>,
|
|
||||||
pub type_: Option<String>, // SRV, A, AAAA
|
|
||||||
pub port: Option<u16>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consul service discovery configuration.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ConsulSDConfig {
|
|
||||||
pub server: String,
|
|
||||||
pub services: Option<Vec<String>>,
|
|
||||||
pub scheme: Option<String>,
|
|
||||||
pub datacenter: Option<String>,
|
|
||||||
pub tag_separator: Option<String>,
|
|
||||||
pub refresh_interval: Option<String>,
|
|
||||||
pub tls_config: Option<TLSConfig>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Basic authentication credentials.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct BasicAuth {
|
|
||||||
pub username: String,
|
|
||||||
pub password: Option<String>,
|
|
||||||
pub password_file: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Bearer token or other auth mechanisms.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Authorization {
|
|
||||||
pub credentials: Option<String>,
|
|
||||||
pub credentials_file: Option<String>,
|
|
||||||
pub type_: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TLS configuration for secure scraping.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct TLSConfig {
|
|
||||||
pub ca_file: Option<String>,
|
|
||||||
pub cert_file: Option<String>,
|
|
||||||
pub key_file: Option<String>,
|
|
||||||
pub server_name: Option<String>,
|
|
||||||
pub insecure_skip_verify: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Authorization parameters for SNMP walk.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct Params {
|
|
||||||
pub auth: Option<Vec<String>>,
|
|
||||||
pub module: Option<Vec<String>>,
|
|
||||||
}
|
|
||||||
@@ -4,7 +4,6 @@ pub mod crd_default_rules;
|
|||||||
pub mod crd_grafana;
|
pub mod crd_grafana;
|
||||||
pub mod crd_prometheus_rules;
|
pub mod crd_prometheus_rules;
|
||||||
pub mod crd_prometheuses;
|
pub mod crd_prometheuses;
|
||||||
pub mod crd_scrape_config;
|
|
||||||
pub mod grafana_default_dashboard;
|
pub mod grafana_default_dashboard;
|
||||||
pub mod grafana_operator;
|
pub mod grafana_operator;
|
||||||
pub mod prometheus_operator;
|
pub mod prometheus_operator;
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
|||||||
sender: KubePrometheus { config },
|
sender: KubePrometheus { config },
|
||||||
receivers: self.receivers.clone(),
|
receivers: self.receivers.clone(),
|
||||||
rules: self.rules.clone(),
|
rules: self.rules.clone(),
|
||||||
scrape_targets: None,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -6,4 +6,3 @@ pub mod kube_prometheus;
|
|||||||
pub mod ntfy;
|
pub mod ntfy;
|
||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
pub mod scrape_target;
|
|
||||||
|
|||||||
@@ -100,7 +100,11 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
|||||||
|
|
||||||
info!("deploying ntfy...");
|
info!("deploying ntfy...");
|
||||||
client
|
client
|
||||||
.wait_until_deployment_ready("ntfy", Some(self.score.namespace.as_str()), None)
|
.wait_until_deployment_ready(
|
||||||
|
"ntfy".to_string(),
|
||||||
|
Some(self.score.namespace.as_str()),
|
||||||
|
None,
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
info!("ntfy deployed");
|
info!("ntfy deployed");
|
||||||
|
|
||||||
|
|||||||
@@ -1,270 +0,0 @@
|
|||||||
use base64::prelude::*;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use kube::api::DynamicObject;
|
|
||||||
use log::{debug, info, trace};
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::monitoring::okd::OpenshiftClusterAlertSender,
|
|
||||||
score::Score,
|
|
||||||
topology::{K8sclient, Topology, oberservability::monitoring::AlertReceiver},
|
|
||||||
};
|
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
self.clone_box()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct OpenshiftClusterAlertScore {
|
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<OpenshiftClusterAlertSender>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for OpenshiftClusterAlertScore {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"ClusterAlertScore".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(OpenshiftClusterAlertInterpret {
|
|
||||||
receivers: self.receivers.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct OpenshiftClusterAlertInterpret {
|
|
||||||
receivers: Vec<Box<dyn AlertReceiver<OpenshiftClusterAlertSender>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftClusterAlertInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let client = topology.k8s_client().await?;
|
|
||||||
let openshift_monitoring_namespace = "openshift-monitoring";
|
|
||||||
|
|
||||||
let mut alertmanager_main_secret: DynamicObject = client
|
|
||||||
.get_secret_json_value("alertmanager-main", Some(openshift_monitoring_namespace))
|
|
||||||
.await?;
|
|
||||||
trace!("Got secret {alertmanager_main_secret:#?}");
|
|
||||||
|
|
||||||
let data: &mut serde_json::Value = &mut alertmanager_main_secret.data;
|
|
||||||
trace!("Alertmanager-main secret data {data:#?}");
|
|
||||||
let data_obj = data
|
|
||||||
.get_mut("data")
|
|
||||||
.ok_or(InterpretError::new(
|
|
||||||
"Missing 'data' field in alertmanager-main secret.".to_string(),
|
|
||||||
))?
|
|
||||||
.as_object_mut()
|
|
||||||
.ok_or(InterpretError::new(
|
|
||||||
"'data' field in alertmanager-main secret is expected to be an object ."
|
|
||||||
.to_string(),
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let config_b64 = data_obj
|
|
||||||
.get("alertmanager.yaml")
|
|
||||||
.ok_or(InterpretError::new(
|
|
||||||
"Missing 'alertmanager.yaml' in alertmanager-main secret data".to_string(),
|
|
||||||
))?
|
|
||||||
.as_str()
|
|
||||||
.unwrap_or("");
|
|
||||||
trace!("Config base64 {config_b64}");
|
|
||||||
|
|
||||||
let config_bytes = BASE64_STANDARD.decode(config_b64).unwrap_or_default();
|
|
||||||
|
|
||||||
let mut am_config: serde_yaml::Value =
|
|
||||||
serde_yaml::from_str(&String::from_utf8(config_bytes).unwrap_or_default())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
debug!("Current alertmanager config {am_config:#?}");
|
|
||||||
|
|
||||||
let existing_receivers_sequence = if let Some(receivers) = am_config.get_mut("receivers") {
|
|
||||||
match receivers.as_sequence_mut() {
|
|
||||||
Some(seq) => seq,
|
|
||||||
None => {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Expected alertmanager config receivers to be a sequence, got {:?}",
|
|
||||||
receivers
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
&mut serde_yaml::Sequence::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut additional_resources = vec![];
|
|
||||||
|
|
||||||
for custom_receiver in &self.receivers {
|
|
||||||
let name = custom_receiver.name();
|
|
||||||
let alertmanager_receiver = custom_receiver.as_alertmanager_receiver()?;
|
|
||||||
|
|
||||||
let receiver_json_value = alertmanager_receiver.receiver_config;
|
|
||||||
|
|
||||||
let receiver_yaml_string =
|
|
||||||
serde_json::to_string(&receiver_json_value).map_err(|e| {
|
|
||||||
InterpretError::new(format!("Failed to serialize receiver config: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let receiver_yaml_value: serde_yaml::Value =
|
|
||||||
serde_yaml::from_str(&receiver_yaml_string).map_err(|e| {
|
|
||||||
InterpretError::new(format!("Failed to parse receiver config as YAML: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(idx) = existing_receivers_sequence.iter().position(|r| {
|
|
||||||
r.get("name")
|
|
||||||
.and_then(|n| n.as_str())
|
|
||||||
.map_or(false, |n| n == name)
|
|
||||||
}) {
|
|
||||||
info!("Replacing existing AlertManager receiver: {}", name);
|
|
||||||
existing_receivers_sequence[idx] = receiver_yaml_value;
|
|
||||||
} else {
|
|
||||||
debug!("Adding new AlertManager receiver: {}", name);
|
|
||||||
existing_receivers_sequence.push(receiver_yaml_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
additional_resources.push(alertmanager_receiver.additional_ressources);
|
|
||||||
}
|
|
||||||
|
|
||||||
let existing_route_mapping = if let Some(route) = am_config.get_mut("route") {
|
|
||||||
match route.as_mapping_mut() {
|
|
||||||
Some(map) => map,
|
|
||||||
None => {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Expected alertmanager config route to be a mapping, got {:?}",
|
|
||||||
route
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
&mut serde_yaml::Mapping::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let existing_route_sequence = if let Some(routes) = existing_route_mapping.get_mut("routes")
|
|
||||||
{
|
|
||||||
match routes.as_sequence_mut() {
|
|
||||||
Some(seq) => seq,
|
|
||||||
None => {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Expected alertmanager config routes to be a sequence, got {:?}",
|
|
||||||
routes
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
&mut serde_yaml::Sequence::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
for custom_receiver in &self.receivers {
|
|
||||||
let name = custom_receiver.name();
|
|
||||||
let alertmanager_receiver = custom_receiver.as_alertmanager_receiver()?;
|
|
||||||
|
|
||||||
let route_json_value = alertmanager_receiver.route_config;
|
|
||||||
let route_yaml_string = serde_json::to_string(&route_json_value).map_err(|e| {
|
|
||||||
InterpretError::new(format!("Failed to serialize route config: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let route_yaml_value: serde_yaml::Value = serde_yaml::from_str(&route_yaml_string)
|
|
||||||
.map_err(|e| {
|
|
||||||
InterpretError::new(format!("Failed to parse route config as YAML: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(idy) = existing_route_sequence.iter().position(|r| {
|
|
||||||
r.get("receiver")
|
|
||||||
.and_then(|n| n.as_str())
|
|
||||||
.map_or(false, |n| n == name)
|
|
||||||
}) {
|
|
||||||
info!("Replacing existing AlertManager receiver: {}", name);
|
|
||||||
existing_route_sequence[idy] = route_yaml_value;
|
|
||||||
} else {
|
|
||||||
debug!("Adding new AlertManager receiver: {}", name);
|
|
||||||
existing_route_sequence.push(route_yaml_value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
debug!("Current alertmanager config {am_config:#?}");
|
|
||||||
// TODO
|
|
||||||
// - save new version of alertmanager config
|
|
||||||
// - write additional ressources to the cluster
|
|
||||||
let am_config = serde_yaml::to_string(&am_config).map_err(|e| {
|
|
||||||
InterpretError::new(format!(
|
|
||||||
"Failed to serialize new alertmanager config to string : {e}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let mut am_config_b64 = String::new();
|
|
||||||
BASE64_STANDARD.encode_string(am_config, &mut am_config_b64);
|
|
||||||
|
|
||||||
// TODO put update configmap value and save new value
|
|
||||||
data_obj.insert(
|
|
||||||
"alertmanager.yaml".to_string(),
|
|
||||||
serde_json::Value::String(am_config_b64),
|
|
||||||
);
|
|
||||||
|
|
||||||
// https://kubernetes.io/docs/reference/using-api/server-side-apply/#field-management
|
|
||||||
alertmanager_main_secret.metadata.managed_fields = None;
|
|
||||||
|
|
||||||
trace!("Applying new alertmanager_main_secret {alertmanager_main_secret:#?}");
|
|
||||||
client
|
|
||||||
.apply_dynamic(
|
|
||||||
&alertmanager_main_secret,
|
|
||||||
Some(openshift_monitoring_namespace),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let additional_resources = additional_resources.concat();
|
|
||||||
trace!("Applying additional ressources for alert receivers {additional_resources:#?}");
|
|
||||||
client
|
|
||||||
.apply_dynamic_many(
|
|
||||||
&additional_resources,
|
|
||||||
Some(openshift_monitoring_namespace),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"Successfully configured {} cluster alert receivers: {}",
|
|
||||||
self.receivers.len(),
|
|
||||||
self.receivers
|
|
||||||
.iter()
|
|
||||||
.map(|r| r.name())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(", ")
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OpenshiftClusterAlertInterpret")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,90 +0,0 @@
|
|||||||
use std::{collections::BTreeMap, sync::Arc};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
interpret::{InterpretError, Outcome},
|
|
||||||
topology::k8s::K8sClient,
|
|
||||||
};
|
|
||||||
use k8s_openapi::api::core::v1::ConfigMap;
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
|
|
||||||
pub(crate) struct Config;
|
|
||||||
|
|
||||||
impl Config {
|
|
||||||
pub async fn create_cluster_monitoring_config_cm(
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let mut data = BTreeMap::new();
|
|
||||||
data.insert(
|
|
||||||
"config.yaml".to_string(),
|
|
||||||
r#"
|
|
||||||
enableUserWorkload: true
|
|
||||||
alertmanagerMain:
|
|
||||||
enableUserAlertmanagerConfig: true
|
|
||||||
"#
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let cm = ConfigMap {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("cluster-monitoring-config".to_string()),
|
|
||||||
namespace: Some("openshift-monitoring".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
data: Some(data),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
client.apply(&cm, Some("openshift-monitoring")).await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"updated cluster-monitoring-config-map".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_user_workload_monitoring_config_cm(
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let mut data = BTreeMap::new();
|
|
||||||
data.insert(
|
|
||||||
"config.yaml".to_string(),
|
|
||||||
r#"
|
|
||||||
alertmanager:
|
|
||||||
enabled: true
|
|
||||||
enableAlertmanagerConfig: true
|
|
||||||
"#
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
let cm = ConfigMap {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("user-workload-monitoring-config".to_string()),
|
|
||||||
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
data: Some(data),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
client
|
|
||||||
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"updated openshift-user-monitoring-config-map".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn verify_user_workload(client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
|
||||||
let namespace = "openshift-user-workload-monitoring";
|
|
||||||
let alertmanager_name = "alertmanager-user-workload-0";
|
|
||||||
let prometheus_name = "prometheus-user-workload-0";
|
|
||||||
client
|
|
||||||
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
|
||||||
.await?;
|
|
||||||
client
|
|
||||||
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"pods: {}, {} ready in ns: {}",
|
|
||||||
alertmanager_name, prometheus_name, namespace
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,13 +1,16 @@
|
|||||||
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::monitoring::okd::config::Config,
|
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{K8sclient, Topology},
|
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
use k8s_openapi::api::core::v1::ConfigMap;
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
@@ -34,9 +37,10 @@ impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringIn
|
|||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let client = topology.k8s_client().await.unwrap();
|
let client = topology.k8s_client().await.unwrap();
|
||||||
Config::create_cluster_monitoring_config_cm(&client).await?;
|
self.update_cluster_monitoring_config_cm(&client).await?;
|
||||||
Config::create_user_workload_monitoring_config_cm(&client).await?;
|
self.update_user_workload_monitoring_config_cm(&client)
|
||||||
Config::verify_user_workload(&client).await?;
|
.await?;
|
||||||
|
self.verify_user_workload(&client).await?;
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::success(
|
||||||
"successfully enabled user-workload-monitoring".to_string(),
|
"successfully enabled user-workload-monitoring".to_string(),
|
||||||
))
|
))
|
||||||
@@ -58,3 +62,88 @@ impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringIn
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl OpenshiftUserWorkloadMonitoringInterpret {
|
||||||
|
pub async fn update_cluster_monitoring_config_cm(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let mut data = BTreeMap::new();
|
||||||
|
data.insert(
|
||||||
|
"config.yaml".to_string(),
|
||||||
|
r#"
|
||||||
|
enableUserWorkload: true
|
||||||
|
alertmanagerMain:
|
||||||
|
enableUserAlertmanagerConfig: true
|
||||||
|
"#
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let cm = ConfigMap {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("cluster-monitoring-config".to_string()),
|
||||||
|
namespace: Some("openshift-monitoring".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: Some(data),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
client.apply(&cm, Some("openshift-monitoring")).await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(
|
||||||
|
"updated cluster-monitoring-config-map".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_user_workload_monitoring_config_cm(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let mut data = BTreeMap::new();
|
||||||
|
data.insert(
|
||||||
|
"config.yaml".to_string(),
|
||||||
|
r#"
|
||||||
|
alertmanager:
|
||||||
|
enabled: true
|
||||||
|
enableAlertmanagerConfig: true
|
||||||
|
"#
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
let cm = ConfigMap {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("user-workload-monitoring-config".to_string()),
|
||||||
|
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: Some(data),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
client
|
||||||
|
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(
|
||||||
|
"updated openshift-user-monitoring-config-map".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn verify_user_workload(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let namespace = "openshift-user-workload-monitoring";
|
||||||
|
let alertmanager_name = "alertmanager-user-workload-0";
|
||||||
|
let prometheus_name = "prometheus-user-workload-0";
|
||||||
|
client
|
||||||
|
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
||||||
|
.await?;
|
||||||
|
client
|
||||||
|
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"pods: {}, {} ready in ns: {}",
|
||||||
|
alertmanager_name, prometheus_name, namespace
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,14 +1 @@
|
|||||||
use crate::topology::oberservability::monitoring::AlertSender;
|
|
||||||
|
|
||||||
pub mod cluster_monitoring;
|
|
||||||
pub(crate) mod config;
|
|
||||||
pub mod enable_user_workload;
|
pub mod enable_user_workload;
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct OpenshiftClusterAlertSender;
|
|
||||||
|
|
||||||
impl AlertSender for OpenshiftClusterAlertSender {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"OpenshiftClusterAlertSender".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ impl Prometheus {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ns) = namespace.as_deref() {
|
if let Some(ns) = namespace.as_deref() {
|
||||||
grafana_helm_chart_score(ns, false)
|
grafana_helm_chart_score(ns)
|
||||||
.interpret(inventory, topology)
|
.interpret(inventory, topology)
|
||||||
.await
|
.await
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
pub mod server;
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
use std::net::IpAddr;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
interpret::{InterpretError, Outcome},
|
|
||||||
modules::monitoring::kube_prometheus::crd::{
|
|
||||||
crd_alertmanager_config::CRDPrometheus,
|
|
||||||
crd_scrape_config::{Params, RelabelConfig, ScrapeConfig, ScrapeConfigSpec, StaticConfig},
|
|
||||||
},
|
|
||||||
topology::oberservability::monitoring::ScrapeTarget,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct Server {
|
|
||||||
pub name: String,
|
|
||||||
pub ip: IpAddr,
|
|
||||||
pub auth: String,
|
|
||||||
pub module: String,
|
|
||||||
pub domain: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl ScrapeTarget<CRDPrometheus> for Server {
|
|
||||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
|
||||||
let scrape_config_spec = ScrapeConfigSpec {
|
|
||||||
static_configs: Some(vec![StaticConfig {
|
|
||||||
targets: vec![self.ip.to_string()],
|
|
||||||
labels: None,
|
|
||||||
}]),
|
|
||||||
scrape_interval: Some("2m".to_string()),
|
|
||||||
kubernetes_sd_configs: None,
|
|
||||||
http_sd_configs: None,
|
|
||||||
file_sd_configs: None,
|
|
||||||
dns_sd_configs: None,
|
|
||||||
params: Some(Params {
|
|
||||||
auth: Some(vec![self.auth.clone()]),
|
|
||||||
module: Some(vec![self.module.clone()]),
|
|
||||||
}),
|
|
||||||
consul_sd_configs: None,
|
|
||||||
relabel_configs: Some(vec![RelabelConfig {
|
|
||||||
action: None,
|
|
||||||
source_labels: Some(vec!["__address__".to_string()]),
|
|
||||||
separator: None,
|
|
||||||
target_label: Some("__param_target".to_string()),
|
|
||||||
regex: None,
|
|
||||||
replacement: Some(format!("snmp.{}:31080", self.domain.clone())),
|
|
||||||
modulus: None,
|
|
||||||
}]),
|
|
||||||
metric_relabel_configs: None,
|
|
||||||
metrics_path: Some("/snmp".to_string()),
|
|
||||||
scrape_timeout: Some("2m".to_string()),
|
|
||||||
job_name: Some(format!("snmp_exporter/cloud/{}", self.name.clone())),
|
|
||||||
scheme: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let scrape_config = ScrapeConfig {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(self.name.clone()),
|
|
||||||
namespace: Some(sender.namespace.clone()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: scrape_config_spec,
|
|
||||||
};
|
|
||||||
sender
|
|
||||||
.client
|
|
||||||
.apply(&scrape_config, Some(&sender.namespace.clone()))
|
|
||||||
.await?;
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"installed scrape target {}",
|
|
||||||
self.name.clone()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clone_box(&self) -> Box<dyn ScrapeTarget<CRDPrometheus>> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -5,8 +5,10 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::{
|
modules::{
|
||||||
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
dhcp::DhcpHostBindingScore,
|
||||||
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
|
http::IPxeMacBootFileScore,
|
||||||
|
inventory::DiscoverHostForRoleScore,
|
||||||
|
okd::{host_network::HostNetworkConfigurationScore, templates::BootstrapIpxeTpl},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HAClusterTopology, HostBinding},
|
topology::{HAClusterTopology, HostBinding},
|
||||||
@@ -203,6 +205,28 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Placeholder for automating network bonding configuration.
|
||||||
|
async fn persist_network_bond(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
hosts: &Vec<PhysicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
info!("[ControlPlane] Ensuring persistent bonding");
|
||||||
|
let score = HostNetworkConfigurationScore {
|
||||||
|
hosts: hosts.clone(), // FIXME: Avoid clone if possible
|
||||||
|
};
|
||||||
|
score.interpret(inventory, topology).await?;
|
||||||
|
|
||||||
|
inquire::Confirm::new(
|
||||||
|
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
|
||||||
|
)
|
||||||
|
.prompt()
|
||||||
|
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -241,6 +265,10 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
|||||||
// 4. Reboot the nodes to start the OS installation.
|
// 4. Reboot the nodes to start the OS installation.
|
||||||
self.reboot_targets(&nodes).await?;
|
self.reboot_targets(&nodes).await?;
|
||||||
|
|
||||||
|
// 5. Placeholder for post-boot network configuration (e.g., bonding).
|
||||||
|
self.persist_network_bond(inventory, topology, &nodes)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||||
// and for the cluster operators to become available. This would be similar to
|
// and for the cluster operators to become available. This would be similar to
|
||||||
// the `wait-for bootstrap-complete` command.
|
// the `wait-for bootstrap-complete` command.
|
||||||
|
|||||||
@@ -77,8 +77,6 @@ impl OKDBootstrapLoadBalancerScore {
|
|||||||
address: topology.bootstrap_host.ip.to_string(),
|
address: topology.bootstrap_host.ip.to_string(),
|
||||||
port,
|
port,
|
||||||
});
|
});
|
||||||
|
|
||||||
backend.dedup();
|
|
||||||
backend
|
backend
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,130 +0,0 @@
|
|||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
hardware::PhysicalHost,
|
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::{HostRole, Inventory},
|
|
||||||
modules::okd::host_network::HostNetworkConfigurationScore,
|
|
||||||
score::Score,
|
|
||||||
topology::HAClusterTopology,
|
|
||||||
};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::info;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
|
||||||
// Persist Network Bond
|
|
||||||
// - Persist bonding via NMState
|
|
||||||
// - Persist port channels on the Switch
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
|
||||||
pub struct OKDSetupPersistNetworkBondScore {}
|
|
||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetupPersistNetworkBondScore {
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
|
||||||
Box::new(OKDSetupPersistNetworkBondInterpet::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"OKDSetupPersistNetworkBondScore".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct OKDSetupPersistNetworkBondInterpet {
|
|
||||||
version: Version,
|
|
||||||
status: InterpretStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDSetupPersistNetworkBondInterpet {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let version = Version::from("1.0.0").unwrap();
|
|
||||||
Self {
|
|
||||||
version,
|
|
||||||
status: InterpretStatus::QUEUED,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
|
||||||
/// It will trigger discovery if not enough hosts are found.
|
|
||||||
async fn get_nodes(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
_topology: &HAClusterTopology,
|
|
||||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
|
||||||
const REQUIRED_HOSTS: usize = 3;
|
|
||||||
let repo = InventoryRepositoryFactory::build().await?;
|
|
||||||
let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
|
||||||
|
|
||||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
|
||||||
REQUIRED_HOSTS,
|
|
||||||
control_plane_hosts.len()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
// Take exactly the number of required hosts to ensure consistency.
|
|
||||||
Ok(control_plane_hosts
|
|
||||||
.into_iter()
|
|
||||||
.take(REQUIRED_HOSTS)
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn persist_network_bond(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
hosts: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!("Ensuring persistent bonding");
|
|
||||||
|
|
||||||
let score = HostNetworkConfigurationScore {
|
|
||||||
hosts: hosts.clone(),
|
|
||||||
};
|
|
||||||
score.interpret(inventory, topology).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Interpret<HAClusterTopology> for OKDSetupPersistNetworkBondInterpet {
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OKDSetupPersistNetworkBondInterpet")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
self.version.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
self.status.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let nodes = self.get_nodes(inventory, topology).await?;
|
|
||||||
|
|
||||||
let res = self.persist_network_bond(inventory, topology, &nodes).await;
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(_) => Ok(Outcome::success(
|
|
||||||
"Network bond successfully persisted".into(),
|
|
||||||
)),
|
|
||||||
Err(_) => Err(InterpretError::new(
|
|
||||||
"Failed to persist network bond".to_string(),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1 +1,41 @@
|
|||||||
|
use kube::CustomResource;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub mod nmstate;
|
pub mod nmstate;
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1",
|
||||||
|
kind = "OperatorGroup",
|
||||||
|
namespaced
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct OperatorGroupSpec {
|
||||||
|
pub target_namespaces: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "Subscription",
|
||||||
|
namespaced
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SubscriptionSpec {
|
||||||
|
pub name: String,
|
||||||
|
pub source: String,
|
||||||
|
pub source_namespace: String,
|
||||||
|
pub channel: Option<String>,
|
||||||
|
pub install_plan_approval: Option<InstallPlanApproval>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
pub enum InstallPlanApproval {
|
||||||
|
#[serde(rename = "Automatic")]
|
||||||
|
Automatic,
|
||||||
|
#[serde(rename = "Manual")]
|
||||||
|
Manual,
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,16 +6,9 @@ use serde::{Deserialize, Serialize};
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
#[kube(
|
#[kube(group = "nmstate.io", version = "v1", kind = "NMState", namespaced)]
|
||||||
group = "nmstate.io",
|
|
||||||
version = "v1",
|
|
||||||
kind = "NMState",
|
|
||||||
plural = "nmstates",
|
|
||||||
namespaced = false
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct NMStateSpec {
|
pub struct NMStateSpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub probe_configuration: Option<ProbeConfig>,
|
pub probe_configuration: Option<ProbeConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,7 +44,6 @@ pub struct ProbeDns {
|
|||||||
)]
|
)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct NodeNetworkConfigurationPolicySpec {
|
pub struct NodeNetworkConfigurationPolicySpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub node_selector: Option<BTreeMap<String, String>>,
|
pub node_selector: Option<BTreeMap<String, String>>,
|
||||||
pub desired_state: DesiredStateSpec,
|
pub desired_state: DesiredStateSpec,
|
||||||
}
|
}
|
||||||
@@ -66,64 +58,37 @@ pub struct DesiredStateSpec {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct InterfaceSpec {
|
pub struct InterfaceSpec {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
pub r#type: String,
|
pub r#type: String,
|
||||||
pub state: String,
|
pub state: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mac_address: Option<String>,
|
pub mac_address: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub copy_mac_from: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mtu: Option<u32>,
|
pub mtu: Option<u32>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub controller: Option<String>,
|
pub controller: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ipv4: Option<IpStackSpec>,
|
pub ipv4: Option<IpStackSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ipv6: Option<IpStackSpec>,
|
pub ipv6: Option<IpStackSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ethernet: Option<EthernetSpec>,
|
pub ethernet: Option<EthernetSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub link_aggregation: Option<BondSpec>,
|
pub link_aggregation: Option<BondSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub vlan: Option<VlanSpec>,
|
pub vlan: Option<VlanSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub vxlan: Option<VxlanSpec>,
|
pub vxlan: Option<VxlanSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mac_vtap: Option<MacVtapSpec>,
|
pub mac_vtap: Option<MacVtapSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mac_vlan: Option<MacVlanSpec>,
|
pub mac_vlan: Option<MacVlanSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub infiniband: Option<InfinibandSpec>,
|
pub infiniband: Option<InfinibandSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub linux_bridge: Option<LinuxBridgeSpec>,
|
pub linux_bridge: Option<LinuxBridgeSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ovs_bridge: Option<OvsBridgeSpec>,
|
pub ovs_bridge: Option<OvsBridgeSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ethtool: Option<EthtoolSpec>,
|
pub ethtool: Option<EthtoolSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct IpStackSpec {
|
pub struct IpStackSpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub enabled: Option<bool>,
|
pub enabled: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub dhcp: Option<bool>,
|
pub dhcp: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub autoconf: Option<bool>,
|
pub autoconf: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub address: Option<Vec<IpAddressSpec>>,
|
pub address: Option<Vec<IpAddressSpec>>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub auto_dns: Option<bool>,
|
pub auto_dns: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub auto_gateway: Option<bool>,
|
pub auto_gateway: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub auto_routes: Option<bool>,
|
pub auto_routes: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub dhcp_client_id: Option<String>,
|
pub dhcp_client_id: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub dhcp_duid: Option<String>,
|
pub dhcp_duid: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,11 +102,8 @@ pub struct IpAddressSpec {
|
|||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct EthernetSpec {
|
pub struct EthernetSpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub speed: Option<u32>,
|
pub speed: Option<u32>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub duplex: Option<String>,
|
pub duplex: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub auto_negotiation: Option<bool>,
|
pub auto_negotiation: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,7 +112,6 @@ pub struct EthernetSpec {
|
|||||||
pub struct BondSpec {
|
pub struct BondSpec {
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
pub ports: Vec<String>,
|
pub ports: Vec<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub options: Option<BTreeMap<String, Value>>,
|
pub options: Option<BTreeMap<String, Value>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,7 +120,6 @@ pub struct BondSpec {
|
|||||||
pub struct VlanSpec {
|
pub struct VlanSpec {
|
||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub id: u16,
|
pub id: u16,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub protocol: Option<String>,
|
pub protocol: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,11 +129,8 @@ pub struct VxlanSpec {
|
|||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub remote: String,
|
pub remote: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub local: Option<String>,
|
pub local: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub learning: Option<bool>,
|
pub learning: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub destination_port: Option<u16>,
|
pub destination_port: Option<u16>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,7 +139,6 @@ pub struct VxlanSpec {
|
|||||||
pub struct MacVtapSpec {
|
pub struct MacVtapSpec {
|
||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub promiscuous: Option<bool>,
|
pub promiscuous: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,7 +147,6 @@ pub struct MacVtapSpec {
|
|||||||
pub struct MacVlanSpec {
|
pub struct MacVlanSpec {
|
||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub promiscuous: Option<bool>,
|
pub promiscuous: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,35 +161,25 @@ pub struct InfinibandSpec {
|
|||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgeSpec {
|
pub struct LinuxBridgeSpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub options: Option<LinuxBridgeOptions>,
|
pub options: Option<LinuxBridgeOptions>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ports: Option<Vec<LinuxBridgePort>>,
|
pub ports: Option<Vec<LinuxBridgePort>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgeOptions {
|
pub struct LinuxBridgeOptions {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mac_ageing_time: Option<u32>,
|
pub mac_ageing_time: Option<u32>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub multicast_snooping: Option<bool>,
|
pub multicast_snooping: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub stp: Option<StpOptions>,
|
pub stp: Option<StpOptions>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct StpOptions {
|
pub struct StpOptions {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub enabled: Option<bool>,
|
pub enabled: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub forward_delay: Option<u16>,
|
pub forward_delay: Option<u16>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub hello_time: Option<u16>,
|
pub hello_time: Option<u16>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub max_age: Option<u16>,
|
pub max_age: Option<u16>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub priority: Option<u16>,
|
pub priority: Option<u16>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -242,20 +187,15 @@ pub struct StpOptions {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgePort {
|
pub struct LinuxBridgePort {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub vlan: Option<LinuxBridgePortVlan>,
|
pub vlan: Option<LinuxBridgePortVlan>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgePortVlan {
|
pub struct LinuxBridgePortVlan {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mode: Option<String>,
|
pub mode: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub trunk_tags: Option<Vec<VlanTag>>,
|
pub trunk_tags: Option<Vec<VlanTag>>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub tag: Option<u16>,
|
pub tag: Option<u16>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub enable_native: Option<bool>,
|
pub enable_native: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -263,7 +203,6 @@ pub struct LinuxBridgePortVlan {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct VlanTag {
|
pub struct VlanTag {
|
||||||
pub id: u16,
|
pub id: u16,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub id_range: Option<VlanIdRange>,
|
pub id_range: Option<VlanIdRange>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -277,20 +216,15 @@ pub struct VlanIdRange {
|
|||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct OvsBridgeSpec {
|
pub struct OvsBridgeSpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub options: Option<OvsBridgeOptions>,
|
pub options: Option<OvsBridgeOptions>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ports: Option<Vec<OvsPortSpec>>,
|
pub ports: Option<Vec<OvsPortSpec>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct OvsBridgeOptions {
|
pub struct OvsBridgeOptions {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub stp: Option<bool>,
|
pub stp: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub rstp: Option<bool>,
|
pub rstp: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mcast_snooping_enable: Option<bool>,
|
pub mcast_snooping_enable: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,25 +232,20 @@ pub struct OvsBridgeOptions {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct OvsPortSpec {
|
pub struct OvsPortSpec {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub link_aggregation: Option<BondSpec>,
|
pub link_aggregation: Option<BondSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub vlan: Option<LinuxBridgePortVlan>,
|
pub vlan: Option<LinuxBridgePortVlan>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub r#type: Option<String>,
|
pub r#type: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct EthtoolSpec {
|
pub struct EthtoolSpec {
|
||||||
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
// FIXME: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct EthtoolFecSpec {
|
pub struct EthtoolFecSpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub auto: Option<bool>,
|
pub auto: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mode: Option<String>,
|
pub mode: Option<String>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,70 +39,29 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
&self,
|
&self,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
host: &PhysicalHost,
|
host: &PhysicalHost,
|
||||||
current_host: &usize,
|
) -> Result<(), InterpretError> {
|
||||||
total_hosts: &usize,
|
let switch_ports = self.collect_switch_ports_for_host(topology, host).await?;
|
||||||
) -> Result<HostNetworkConfig, InterpretError> {
|
if !switch_ports.is_empty() {
|
||||||
if host.network.is_empty() {
|
|
||||||
info!("[Host {current_host}/{total_hosts}] No interfaces to configure, skipping");
|
|
||||||
return Ok(HostNetworkConfig {
|
|
||||||
host_id: host.id.clone(),
|
|
||||||
switch_ports: vec![],
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let switch_ports = self
|
|
||||||
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let config = HostNetworkConfig {
|
|
||||||
host_id: host.id.clone(),
|
|
||||||
switch_ports,
|
|
||||||
};
|
|
||||||
|
|
||||||
if !config.switch_ports.is_empty() {
|
|
||||||
info!(
|
|
||||||
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
|
|
||||||
config.switch_ports.len(),
|
|
||||||
host.network.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
|
|
||||||
topology
|
topology
|
||||||
.configure_host_network(&config)
|
.configure_host_network(host, HostNetworkConfig { switch_ports })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
|
|
||||||
host.network.len()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(config)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
||||||
&self,
|
&self,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
host: &PhysicalHost,
|
host: &PhysicalHost,
|
||||||
current_host: &usize,
|
|
||||||
total_hosts: &usize,
|
|
||||||
) -> Result<Vec<SwitchPort>, InterpretError> {
|
) -> Result<Vec<SwitchPort>, InterpretError> {
|
||||||
let mut switch_ports = vec![];
|
let mut switch_ports = vec![];
|
||||||
|
|
||||||
if host.network.is_empty() {
|
|
||||||
return Ok(switch_ports);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("[Host {current_host}/{total_hosts}] Collecting ports on switch...");
|
|
||||||
for network_interface in &host.network {
|
for network_interface in &host.network {
|
||||||
let mac_address = network_interface.mac_address;
|
let mac_address = network_interface.mac_address;
|
||||||
|
|
||||||
match topology.get_port_for_mac_address(&mac_address).await {
|
match topology.get_port_for_mac_address(&mac_address).await {
|
||||||
Ok(Some(port)) => {
|
Ok(Some(port)) => {
|
||||||
info!(
|
|
||||||
"[Host {current_host}/{total_hosts}] Found port '{port}' for '{mac_address}'"
|
|
||||||
);
|
|
||||||
switch_ports.push(SwitchPort {
|
switch_ports.push(SwitchPort {
|
||||||
interface: NetworkInterface {
|
interface: NetworkInterface {
|
||||||
name: network_interface.name.clone(),
|
name: network_interface.name.clone(),
|
||||||
@@ -113,7 +72,7 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
port,
|
port,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(None) => debug!("No port found for '{mac_address}', skipping"),
|
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!(
|
||||||
"Failed to get port for host '{}': {}",
|
"Failed to get port for host '{}': {}",
|
||||||
@@ -125,47 +84,6 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
|
|
||||||
Ok(switch_ports)
|
Ok(switch_ports)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn format_host_configuration(&self, configs: Vec<HostNetworkConfig>) -> Vec<String> {
|
|
||||||
let mut report = vec![
|
|
||||||
"Network Configuration Report".to_string(),
|
|
||||||
"------------------------------------------------------------------".to_string(),
|
|
||||||
];
|
|
||||||
|
|
||||||
for config in configs {
|
|
||||||
let host = self
|
|
||||||
.score
|
|
||||||
.hosts
|
|
||||||
.iter()
|
|
||||||
.find(|h| h.id == config.host_id)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
println!("[Host] {host}");
|
|
||||||
|
|
||||||
if config.switch_ports.is_empty() {
|
|
||||||
report.push(format!(
|
|
||||||
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
|
||||||
config.host_id
|
|
||||||
));
|
|
||||||
} else {
|
|
||||||
let mappings: Vec<String> = config
|
|
||||||
.switch_ports
|
|
||||||
.iter()
|
|
||||||
.map(|p| format!("[{} -> {}]", p.interface.name, p.port))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
report.push(format!(
|
|
||||||
"✅ Host {}: Bonded {} port(s) {}",
|
|
||||||
config.host_id,
|
|
||||||
config.switch_ports.len(),
|
|
||||||
mappings.join(", ")
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
report
|
|
||||||
.push("------------------------------------------------------------------".to_string());
|
|
||||||
report
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -195,38 +113,28 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
|||||||
return Ok(Outcome::noop("No hosts to configure".into()));
|
return Ok(Outcome::noop("No hosts to configure".into()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let host_count = self.score.hosts.len();
|
info!(
|
||||||
info!("Started network configuration for {host_count} host(s)...",);
|
"Started network configuration for {} host(s)...",
|
||||||
|
self.score.hosts.len()
|
||||||
|
);
|
||||||
|
|
||||||
info!("Setting up switch with sane defaults...");
|
|
||||||
topology
|
topology
|
||||||
.setup_switch()
|
.setup_switch()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
|
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
|
||||||
info!("Switch ready");
|
|
||||||
|
|
||||||
let mut current_host = 1;
|
|
||||||
let mut host_configurations = vec![];
|
|
||||||
|
|
||||||
|
let mut configured_host_count = 0;
|
||||||
for host in &self.score.hosts {
|
for host in &self.score.hosts {
|
||||||
let host_configuration = self
|
// FIXME: Clear the previous config for host
|
||||||
.configure_network_for_host(topology, host, ¤t_host, &host_count)
|
self.configure_network_for_host(topology, host).await?;
|
||||||
.await?;
|
configured_host_count += 1;
|
||||||
|
|
||||||
host_configurations.push(host_configuration);
|
|
||||||
current_host += 1;
|
|
||||||
}
|
}
|
||||||
if current_host > 1 {
|
|
||||||
let details = self.format_host_configuration(host_configurations);
|
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(
|
if configured_host_count > 0 {
|
||||||
format!(
|
Ok(Outcome::success(format!(
|
||||||
"Configured {}/{} host(s)",
|
"Configured {configured_host_count}/{} host(s)",
|
||||||
current_host - 1,
|
self.score.hosts.len()
|
||||||
self.score.hosts.len()
|
)))
|
||||||
),
|
|
||||||
details,
|
|
||||||
))
|
|
||||||
} else {
|
} else {
|
||||||
Ok(Outcome::noop("No hosts configured".into()))
|
Ok(Outcome::noop("No hosts configured".into()))
|
||||||
}
|
}
|
||||||
@@ -301,7 +209,6 @@ mod tests {
|
|||||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||||
HOST_ID.clone(),
|
HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
host_id: HOST_ID.clone(),
|
|
||||||
switch_ports: vec![SwitchPort {
|
switch_ports: vec![SwitchPort {
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
port: PORT.clone(),
|
port: PORT.clone(),
|
||||||
@@ -327,7 +234,6 @@ mod tests {
|
|||||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||||
HOST_ID.clone(),
|
HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
host_id: HOST_ID.clone(),
|
|
||||||
switch_ports: vec![
|
switch_ports: vec![
|
||||||
SwitchPort {
|
SwitchPort {
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
@@ -357,7 +263,6 @@ mod tests {
|
|||||||
(
|
(
|
||||||
HOST_ID.clone(),
|
HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
host_id: HOST_ID.clone(),
|
|
||||||
switch_ports: vec![SwitchPort {
|
switch_ports: vec![SwitchPort {
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
port: PORT.clone(),
|
port: PORT.clone(),
|
||||||
@@ -367,7 +272,6 @@ mod tests {
|
|||||||
(
|
(
|
||||||
ANOTHER_HOST_ID.clone(),
|
ANOTHER_HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
host_id: ANOTHER_HOST_ID.clone(),
|
|
||||||
switch_ports: vec![SwitchPort {
|
switch_ports: vec![SwitchPort {
|
||||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
port: ANOTHER_PORT.clone(),
|
port: ANOTHER_PORT.clone(),
|
||||||
@@ -379,6 +283,7 @@ mod tests {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
||||||
|
// FIXME: Should it still configure an empty bond/port channel?
|
||||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||||
let topology = TopologyWithSwitch::new_port_not_found();
|
let topology = TopologyWithSwitch::new_port_not_found();
|
||||||
|
|
||||||
@@ -478,10 +383,11 @@ mod tests {
|
|||||||
|
|
||||||
async fn configure_host_network(
|
async fn configure_host_network(
|
||||||
&self,
|
&self,
|
||||||
config: &HostNetworkConfig,
|
host: &PhysicalHost,
|
||||||
|
config: HostNetworkConfig,
|
||||||
) -> Result<(), SwitchError> {
|
) -> Result<(), SwitchError> {
|
||||||
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
||||||
configured_host_networks.push((config.host_id.clone(), config.clone()));
|
configured_host_networks.push((host.id.clone(), config.clone()));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -50,7 +50,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
modules::okd::{
|
modules::okd::{
|
||||||
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
||||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
|
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore,
|
||||||
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
@@ -65,7 +65,6 @@ impl OKDInstallationPipeline {
|
|||||||
Box::new(OKDSetup01InventoryScore::new()),
|
Box::new(OKDSetup01InventoryScore::new()),
|
||||||
Box::new(OKDSetup02BootstrapScore::new()),
|
Box::new(OKDSetup02BootstrapScore::new()),
|
||||||
Box::new(OKDSetup03ControlPlaneScore::new()),
|
Box::new(OKDSetup03ControlPlaneScore::new()),
|
||||||
Box::new(OKDSetupPersistNetworkBondScore::new()),
|
|
||||||
Box::new(OKDSetup04WorkersScore::new()),
|
Box::new(OKDSetup04WorkersScore::new()),
|
||||||
Box::new(OKDSetup05SanityCheckScore::new()),
|
Box::new(OKDSetup05SanityCheckScore::new()),
|
||||||
Box::new(OKDSetup06InstallationReportScore::new()),
|
Box::new(OKDSetup06InstallationReportScore::new()),
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ mod bootstrap_05_sanity_check;
|
|||||||
mod bootstrap_06_installation_report;
|
mod bootstrap_06_installation_report;
|
||||||
pub mod bootstrap_dhcp;
|
pub mod bootstrap_dhcp;
|
||||||
pub mod bootstrap_load_balancer;
|
pub mod bootstrap_load_balancer;
|
||||||
mod bootstrap_persist_network_bond;
|
|
||||||
pub mod dhcp;
|
pub mod dhcp;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
pub mod installation;
|
pub mod installation;
|
||||||
@@ -20,6 +19,5 @@ pub use bootstrap_03_control_plane::*;
|
|||||||
pub use bootstrap_04_workers::*;
|
pub use bootstrap_04_workers::*;
|
||||||
pub use bootstrap_05_sanity_check::*;
|
pub use bootstrap_05_sanity_check::*;
|
||||||
pub use bootstrap_06_installation_report::*;
|
pub use bootstrap_06_installation_report::*;
|
||||||
pub use bootstrap_persist_network_bond::*;
|
|
||||||
pub mod crd;
|
pub mod crd;
|
||||||
pub mod host_network;
|
pub mod host_network;
|
||||||
|
|||||||
@@ -12,8 +12,7 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::C
|
|||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
|
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
|
||||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||||
GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSecretKeyRef, GrafanaSpec,
|
GrafanaDatasourceSpec, GrafanaSpec,
|
||||||
GrafanaValueFrom, GrafanaValueSource,
|
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
|
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
|
||||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||||
@@ -40,7 +39,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
use super::prometheus::PrometheusMonitoring;
|
use super::prometheus::PrometheusApplicationMonitoring;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct K8sPrometheusCRDAlertingScore {
|
pub struct K8sPrometheusCRDAlertingScore {
|
||||||
@@ -50,7 +49,7 @@ pub struct K8sPrometheusCRDAlertingScore {
|
|||||||
pub prometheus_rules: Vec<RuleGroup>,
|
pub prometheus_rules: Vec<RuleGroup>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Score<T>
|
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
||||||
for K8sPrometheusCRDAlertingScore
|
for K8sPrometheusCRDAlertingScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
@@ -76,7 +75,7 @@ pub struct K8sPrometheusCRDAlertingInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Interpret<T>
|
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
||||||
for K8sPrometheusCRDAlertingInterpret
|
for K8sPrometheusCRDAlertingInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
@@ -467,13 +466,10 @@ impl K8sPrometheusCRDAlertingInterpret {
|
|||||||
match_labels: label.clone(),
|
match_labels: label.clone(),
|
||||||
match_expressions: vec![],
|
match_expressions: vec![],
|
||||||
};
|
};
|
||||||
|
let mut json_data = BTreeMap::new();
|
||||||
|
json_data.insert("timeInterval".to_string(), "5s".to_string());
|
||||||
let namespace = self.sender.namespace.clone();
|
let namespace = self.sender.namespace.clone();
|
||||||
let json_data = GrafanaDatasourceJsonData {
|
|
||||||
time_interval: Some("5s".to_string()),
|
|
||||||
http_header_name1: None,
|
|
||||||
tls_skip_verify: Some(true),
|
|
||||||
oauth_pass_thru: Some(true),
|
|
||||||
};
|
|
||||||
let json = build_default_dashboard(&namespace);
|
let json = build_default_dashboard(&namespace);
|
||||||
|
|
||||||
let graf_data_source = GrafanaDatasource {
|
let graf_data_source = GrafanaDatasource {
|
||||||
@@ -499,11 +495,7 @@ impl K8sPrometheusCRDAlertingInterpret {
|
|||||||
"http://prometheus-operated.{}.svc.cluster.local:9090",
|
"http://prometheus-operated.{}.svc.cluster.local:9090",
|
||||||
self.sender.namespace.clone()
|
self.sender.namespace.clone()
|
||||||
),
|
),
|
||||||
secure_json_data: None,
|
|
||||||
is_default: None,
|
|
||||||
editable: None,
|
|
||||||
},
|
},
|
||||||
values_from: None,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -524,9 +516,7 @@ impl K8sPrometheusCRDAlertingInterpret {
|
|||||||
spec: GrafanaDashboardSpec {
|
spec: GrafanaDashboardSpec {
|
||||||
resync_period: Some("30s".to_string()),
|
resync_period: Some("30s".to_string()),
|
||||||
instance_selector: labels.clone(),
|
instance_selector: labels.clone(),
|
||||||
json: Some(json),
|
json,
|
||||||
grafana_com: None,
|
|
||||||
datasources: None,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -9,17 +9,11 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait PrometheusMonitoring<S: AlertSender> {
|
pub trait PrometheusApplicationMonitoring<S: AlertSender> {
|
||||||
async fn install_prometheus(
|
async fn install_prometheus(
|
||||||
&self,
|
&self,
|
||||||
sender: &S,
|
sender: &S,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
|
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
|
||||||
) -> Result<PreparationOutcome, PreparationError>;
|
) -> Result<PreparationOutcome, PreparationError>;
|
||||||
|
|
||||||
async fn ensure_prometheus_operator(
|
|
||||||
&self,
|
|
||||||
sender: &S,
|
|
||||||
inventory: &Inventory,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError>;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
use super::prometheus::PrometheusMonitoring;
|
use super::prometheus::PrometheusApplicationMonitoring;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct RHOBAlertingScore {
|
pub struct RHOBAlertingScore {
|
||||||
@@ -48,8 +48,8 @@ pub struct RHOBAlertingScore {
|
|||||||
pub prometheus_rules: Vec<RuleGroup>,
|
pub prometheus_rules: Vec<RuleGroup>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Score<T>
|
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||||
for RHOBAlertingScore
|
Score<T> for RHOBAlertingScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
Box::new(RHOBAlertingInterpret {
|
Box::new(RHOBAlertingInterpret {
|
||||||
@@ -74,8 +74,8 @@ pub struct RHOBAlertingInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Interpret<T>
|
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||||
for RHOBAlertingInterpret
|
Interpret<T> for RHOBAlertingInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::{debug, warn};
|
use log::{info, warn};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
@@ -19,8 +19,8 @@ use harmony_types::id::Id;
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct CephRemoveOsd {
|
pub struct CephRemoveOsd {
|
||||||
pub osd_deployment_name: String,
|
osd_deployment_name: String,
|
||||||
pub rook_ceph_namespace: String,
|
rook_ceph_namespace: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
||||||
@@ -54,17 +54,18 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
|||||||
self.verify_deployment_scaled(client.clone()).await?;
|
self.verify_deployment_scaled(client.clone()).await?;
|
||||||
self.delete_deployment(client.clone()).await?;
|
self.delete_deployment(client.clone()).await?;
|
||||||
self.verify_deployment_deleted(client.clone()).await?;
|
self.verify_deployment_deleted(client.clone()).await?;
|
||||||
self.purge_ceph_osd(client.clone()).await?;
|
|
||||||
self.verify_ceph_osd_removal(client.clone()).await?;
|
|
||||||
|
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||||
|
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
|
||||||
|
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
||||||
osd_id_full, self.score.osd_deployment_name
|
osd_id_full, self.score.osd_deployment_name
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::CephRemoveOsd
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
fn get_version(&self) -> Version {
|
||||||
@@ -81,7 +82,7 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CephRemoveOsdInterpret {
|
impl CephRemoveOsdInterpret {
|
||||||
pub fn get_ceph_osd_id_numeric(&self) -> Result<String, InterpretError> {
|
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
||||||
let osd_id_numeric = self
|
let osd_id_numeric = self
|
||||||
.score
|
.score
|
||||||
.osd_deployment_name
|
.osd_deployment_name
|
||||||
@@ -93,14 +94,9 @@ impl CephRemoveOsdInterpret {
|
|||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
Ok(osd_id_numeric.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
|
||||||
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
|
||||||
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
||||||
|
|
||||||
debug!(
|
info!(
|
||||||
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
||||||
osd_id_full, self.score.osd_deployment_name
|
osd_id_full, self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -112,7 +108,6 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!("verifying toolbox exists");
|
|
||||||
let toolbox_dep = "rook-ceph-tools".to_string();
|
let toolbox_dep = "rook-ceph-tools".to_string();
|
||||||
|
|
||||||
match client
|
match client
|
||||||
@@ -154,7 +149,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!(
|
info!(
|
||||||
"Scaling down OSD deployment: {}",
|
"Scaling down OSD deployment: {}",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -177,7 +172,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
|
|
||||||
debug!("Waiting for OSD deployment to scale down to 0 replicas");
|
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||||
loop {
|
loop {
|
||||||
let dep = client
|
let dep = client
|
||||||
.get_deployment(
|
.get_deployment(
|
||||||
@@ -185,9 +180,11 @@ impl CephRemoveOsdInterpret {
|
|||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Some(deployment) = dep {
|
if let Some(deployment) = dep {
|
||||||
if let Some(status) = deployment.status {
|
if let Some(status) = deployment.status {
|
||||||
if status.replicas == None && status.ready_replicas == None {
|
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
|
||||||
|
{
|
||||||
return Ok(Outcome::success(
|
return Ok(Outcome::success(
|
||||||
"Deployment successfully scaled down.".to_string(),
|
"Deployment successfully scaled down.".to_string(),
|
||||||
));
|
));
|
||||||
@@ -215,7 +212,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!(
|
info!(
|
||||||
"Deleting OSD deployment: {}",
|
"Deleting OSD deployment: {}",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -237,7 +234,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
|
|
||||||
debug!("Verifying OSD deployment deleted");
|
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||||
loop {
|
loop {
|
||||||
let dep = client
|
let dep = client
|
||||||
.get_deployment(
|
.get_deployment(
|
||||||
@@ -247,7 +244,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if dep.is_none() {
|
if dep.is_none() {
|
||||||
debug!(
|
info!(
|
||||||
"Deployment {} successfully deleted.",
|
"Deployment {} successfully deleted.",
|
||||||
self.score.osd_deployment_name
|
self.score.osd_deployment_name
|
||||||
);
|
);
|
||||||
@@ -279,10 +276,12 @@ impl CephRemoveOsdInterpret {
|
|||||||
Ok(tree)
|
Ok(tree)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn purge_ceph_osd(&self, client: Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
pub async fn purge_ceph_osd(
|
||||||
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
&self,
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
client: Arc<K8sClient>,
|
||||||
debug!(
|
osd_id_full: &str,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
info!(
|
||||||
"Purging OSD {} from Ceph cluster and removing its auth key",
|
"Purging OSD {} from Ceph cluster and removing its auth key",
|
||||||
osd_id_full
|
osd_id_full
|
||||||
);
|
);
|
||||||
@@ -292,9 +291,8 @@ impl CephRemoveOsdInterpret {
|
|||||||
"app".to_string(),
|
"app".to_string(),
|
||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
vec![
|
vec![
|
||||||
"sh",
|
format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
|
||||||
"-c",
|
format!("ceph auth del osd.{osd_id_full}").as_str(),
|
||||||
format!("ceph osd purge {osd_id_numeric} --yes-i-really-mean-it && ceph auth del {osd_id_full}").as_str(),
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -307,10 +305,10 @@ impl CephRemoveOsdInterpret {
|
|||||||
pub async fn verify_ceph_osd_removal(
|
pub async fn verify_ceph_osd_removal(
|
||||||
&self,
|
&self,
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
|
osd_id_full: &str,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let (timeout, interval, start) = self.build_timer();
|
let (timeout, interval, start) = self.build_timer();
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
info!(
|
||||||
debug!(
|
|
||||||
"Verifying OSD {} has been removed from the Ceph tree...",
|
"Verifying OSD {} has been removed from the Ceph tree...",
|
||||||
osd_id_full
|
osd_id_full
|
||||||
);
|
);
|
||||||
@@ -320,7 +318,7 @@ impl CephRemoveOsdInterpret {
|
|||||||
"rook-ceph-tools".to_string(),
|
"rook-ceph-tools".to_string(),
|
||||||
"app".to_string(),
|
"app".to_string(),
|
||||||
Some(&self.score.rook_ceph_namespace),
|
Some(&self.score.rook_ceph_namespace),
|
||||||
vec!["sh", "-c", "ceph osd tree -f json"],
|
vec!["ceph osd tree -f json"],
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
let tree =
|
let tree =
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
pub mod ceph_remove_osd_score;
|
pub mod ceph_osd_replacement_score;
|
||||||
pub mod ceph_validate_health_score;
|
pub mod ceph_validate_health_score;
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ pub fn init() {
|
|||||||
HarmonyEvent::HarmonyFinished => {
|
HarmonyEvent::HarmonyFinished => {
|
||||||
if !details.is_empty() {
|
if !details.is_empty() {
|
||||||
println!(
|
println!(
|
||||||
"\n{} All done! Here's a few info for you:",
|
"\n{} All done! Here's what's next for you:",
|
||||||
theme::EMOJI_SUMMARY
|
theme::EMOJI_SUMMARY
|
||||||
);
|
);
|
||||||
for detail in details.iter() {
|
for detail in details.iter() {
|
||||||
|
|||||||
@@ -1,96 +0,0 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)]
|
|
||||||
pub struct K8sName(pub String);
|
|
||||||
|
|
||||||
impl K8sName {
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn dummy() -> Self {
|
|
||||||
K8sName("example".to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_valid(name: &str) -> bool {
|
|
||||||
if name.is_empty() || name.len() > 63 {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let b = name.as_bytes();
|
|
||||||
|
|
||||||
if !b[0].is_ascii_alphanumeric() || !b[b.len() - 1].is_ascii_alphanumeric() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
b.iter()
|
|
||||||
.all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || *c == b'-')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for K8sName {
|
|
||||||
type Err = K8sNameError;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
if !Self::is_valid(s) {
|
|
||||||
return Err(K8sNameError::InvalidFormat(format!(
|
|
||||||
"Invalid Kubernetes resource name '{s}': \
|
|
||||||
must match DNS-1123 (lowercase alphanumeric, hyphens, <=63 chars)"
|
|
||||||
)));
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(K8sName(s.to_string()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum K8sNameError {
|
|
||||||
InvalidFormat(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&K8sName> for String {
|
|
||||||
fn from(value: &K8sName) -> Self {
|
|
||||||
value.0.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for K8sName {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(&self.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_valid_name() {
|
|
||||||
assert!(K8sName::from_str("k8s-name-test").is_ok());
|
|
||||||
assert!(K8sName::from_str("n").is_ok());
|
|
||||||
assert!(K8sName::from_str("node1").is_ok());
|
|
||||||
assert!(K8sName::from_str("my-app-v2").is_ok());
|
|
||||||
assert!(K8sName::from_str("service123").is_ok());
|
|
||||||
assert!(K8sName::from_str("abcdefghijklmnopqrstuvwxyz-1234567890").is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_invalid_name() {
|
|
||||||
assert!(K8sName::from_str("").is_err());
|
|
||||||
assert!(K8sName::from_str(".config").is_err());
|
|
||||||
assert!(K8sName::from_str("_hidden").is_err());
|
|
||||||
assert!(K8sName::from_str("UPPER-CASE").is_err());
|
|
||||||
assert!(K8sName::from_str("123-$$$").is_err());
|
|
||||||
assert!(K8sName::from_str("app!name").is_err());
|
|
||||||
assert!(K8sName::from_str("my..app").is_err());
|
|
||||||
assert!(K8sName::from_str("backend-").is_err());
|
|
||||||
assert!(K8sName::from_str("-frontend").is_err());
|
|
||||||
assert!(K8sName::from_str("InvalidName").is_err());
|
|
||||||
assert!(
|
|
||||||
K8sName::from_str("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
|
||||||
.is_err()
|
|
||||||
);
|
|
||||||
assert!(K8sName::from_str("k8s name").is_err());
|
|
||||||
assert!(K8sName::from_str("k8s_name").is_err());
|
|
||||||
assert!(K8sName::from_str("k8s@name").is_err());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
pub mod id;
|
pub mod id;
|
||||||
pub mod k8s_name;
|
|
||||||
pub mod net;
|
pub mod net;
|
||||||
pub mod switch;
|
pub mod switch;
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Debug)]
|
||||||
pub struct HAProxyId(String);
|
pub struct HAProxyId(String);
|
||||||
|
|
||||||
impl Default for HAProxyId {
|
impl Default for HAProxyId {
|
||||||
@@ -297,7 +297,7 @@ pub struct HAProxyFrontends {
|
|||||||
pub frontend: Vec<Frontend>,
|
pub frontend: Vec<Frontend>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct Frontend {
|
pub struct Frontend {
|
||||||
#[yaserde(attribute = true)]
|
#[yaserde(attribute = true)]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
@@ -310,7 +310,7 @@ pub struct Frontend {
|
|||||||
pub bind_options: MaybeString,
|
pub bind_options: MaybeString,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
#[yaserde(rename = "defaultBackend")]
|
#[yaserde(rename = "defaultBackend")]
|
||||||
pub default_backend: Option<String>,
|
pub default_backend: String,
|
||||||
pub ssl_enabled: i32,
|
pub ssl_enabled: i32,
|
||||||
pub ssl_certificates: MaybeString,
|
pub ssl_certificates: MaybeString,
|
||||||
pub ssl_default_certificate: MaybeString,
|
pub ssl_default_certificate: MaybeString,
|
||||||
@@ -416,7 +416,7 @@ pub struct HAProxyBackends {
|
|||||||
pub backends: Vec<HAProxyBackend>,
|
pub backends: Vec<HAProxyBackend>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct HAProxyBackend {
|
pub struct HAProxyBackend {
|
||||||
#[yaserde(attribute = true, rename = "uuid")]
|
#[yaserde(attribute = true, rename = "uuid")]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
@@ -535,7 +535,7 @@ pub struct HAProxyServers {
|
|||||||
pub servers: Vec<HAProxyServer>,
|
pub servers: Vec<HAProxyServer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct HAProxyServer {
|
pub struct HAProxyServer {
|
||||||
#[yaserde(attribute = true, rename = "uuid")]
|
#[yaserde(attribute = true, rename = "uuid")]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
@@ -543,8 +543,8 @@ pub struct HAProxyServer {
|
|||||||
pub enabled: u8,
|
pub enabled: u8,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub description: MaybeString,
|
pub description: MaybeString,
|
||||||
pub address: Option<String>,
|
pub address: String,
|
||||||
pub port: Option<u16>,
|
pub port: u16,
|
||||||
pub checkport: MaybeString,
|
pub checkport: MaybeString,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
pub multiplexer_protocol: MaybeString,
|
pub multiplexer_protocol: MaybeString,
|
||||||
@@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks {
|
|||||||
pub healthchecks: Vec<HAProxyHealthCheck>,
|
pub healthchecks: Vec<HAProxyHealthCheck>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct HAProxyHealthCheck {
|
pub struct HAProxyHealthCheck {
|
||||||
#[yaserde(attribute = true)]
|
#[yaserde(attribute = true)]
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ pub struct Interface {
|
|||||||
pub physical_interface_name: String,
|
pub physical_interface_name: String,
|
||||||
pub descr: Option<MaybeString>,
|
pub descr: Option<MaybeString>,
|
||||||
pub mtu: Option<MaybeString>,
|
pub mtu: Option<MaybeString>,
|
||||||
pub enable: Option<MaybeString>,
|
pub enable: MaybeString,
|
||||||
pub lock: Option<MaybeString>,
|
pub lock: Option<MaybeString>,
|
||||||
#[yaserde(rename = "spoofmac")]
|
#[yaserde(rename = "spoofmac")]
|
||||||
pub spoof_mac: Option<MaybeString>,
|
pub spoof_mac: Option<MaybeString>,
|
||||||
@@ -134,15 +134,19 @@ mod test {
|
|||||||
<interfaces>
|
<interfaces>
|
||||||
<paul>
|
<paul>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</paul>
|
</paul>
|
||||||
<anotherpaul>
|
<anotherpaul>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</anotherpaul>
|
</anotherpaul>
|
||||||
<thirdone>
|
<thirdone>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</thirdone>
|
</thirdone>
|
||||||
<andgofor4>
|
<andgofor4>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</andgofor4>
|
</andgofor4>
|
||||||
</interfaces>
|
</interfaces>
|
||||||
<bar>foo</bar>
|
<bar>foo</bar>
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ sha2 = "0.10.9"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
assertor.workspace = true
|
|
||||||
|
|
||||||
[lints.rust]
|
[lints.rust]
|
||||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }
|
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }
|
||||||
|
|||||||
@@ -30,7 +30,8 @@ impl SshConfigManager {
|
|||||||
|
|
||||||
self.opnsense_shell
|
self.opnsense_shell
|
||||||
.exec(&format!(
|
.exec(&format!(
|
||||||
"cp /conf/config.xml /conf/backup/{backup_filename}"
|
"cp /conf/config.xml /conf/backup/{}",
|
||||||
|
backup_filename
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
mod ssh;
|
mod ssh;
|
||||||
use crate::Error;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
pub use ssh::*;
|
pub use ssh::*;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::Error;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {
|
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {
|
||||||
async fn exec(&self, command: &str) -> Result<String, Error>;
|
async fn exec(&self, command: &str) -> Result<String, Error>;
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
use crate::{config::OPNsenseShell, Error};
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use log::warn;
|
||||||
use opnsense_config_xml::{
|
use opnsense_config_xml::{
|
||||||
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
|
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
|
||||||
};
|
};
|
||||||
use std::{collections::HashSet, sync::Arc};
|
|
||||||
|
use crate::{config::OPNsenseShell, Error};
|
||||||
|
|
||||||
pub struct LoadBalancerConfig<'a> {
|
pub struct LoadBalancerConfig<'a> {
|
||||||
opnsense: &'a mut OPNsense,
|
opnsense: &'a mut OPNsense,
|
||||||
@@ -28,7 +31,7 @@ impl<'a> LoadBalancerConfig<'a> {
|
|||||||
match &mut self.opnsense.opnsense.haproxy.as_mut() {
|
match &mut self.opnsense.opnsense.haproxy.as_mut() {
|
||||||
Some(haproxy) => f(haproxy),
|
Some(haproxy) => f(haproxy),
|
||||||
None => unimplemented!(
|
None => unimplemented!(
|
||||||
"Cannot configure load balancer when haproxy config does not exist yet"
|
"Adding a backend is not supported when haproxy config does not exist yet"
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -37,67 +40,21 @@ impl<'a> LoadBalancerConfig<'a> {
|
|||||||
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
|
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configures a service by removing any existing service on the same port
|
pub fn add_backend(&mut self, backend: HAProxyBackend) {
|
||||||
/// and then adding the new definition. This ensures idempotency.
|
warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks");
|
||||||
pub fn configure_service(
|
self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend));
|
||||||
&mut self,
|
|
||||||
frontend: Frontend,
|
|
||||||
backend: HAProxyBackend,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthcheck: Option<HAProxyHealthCheck>,
|
|
||||||
) {
|
|
||||||
self.remove_service_by_bind_address(&frontend.bind);
|
|
||||||
self.remove_servers(&servers);
|
|
||||||
|
|
||||||
self.add_new_service(frontend, backend, servers, healthcheck);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the corresponding real servers based on their name if they already exist.
|
pub fn add_frontend(&mut self, frontend: Frontend) {
|
||||||
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
|
self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend));
|
||||||
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
|
|
||||||
self.with_haproxy(|haproxy| {
|
|
||||||
haproxy
|
|
||||||
.servers
|
|
||||||
.servers
|
|
||||||
.retain(|s| !server_names.contains(&s.name));
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a service and its dependent components based on the frontend's bind address.
|
pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) {
|
||||||
/// This performs a cascading delete of the frontend, backend, servers, and health check.
|
self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck));
|
||||||
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
|
|
||||||
self.with_haproxy(|haproxy| {
|
|
||||||
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
remove_healthcheck(haproxy, &old_backend);
|
|
||||||
remove_linked_servers(haproxy, &old_backend);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds the components of a new service to the HAProxy configuration.
|
pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) {
|
||||||
/// This function de-duplicates servers by name to prevent configuration errors.
|
self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers));
|
||||||
fn add_new_service(
|
|
||||||
&mut self,
|
|
||||||
frontend: Frontend,
|
|
||||||
backend: HAProxyBackend,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthcheck: Option<HAProxyHealthCheck>,
|
|
||||||
) {
|
|
||||||
self.with_haproxy(|haproxy| {
|
|
||||||
if let Some(check) = healthcheck {
|
|
||||||
haproxy.healthchecks.healthchecks.push(check);
|
|
||||||
}
|
|
||||||
|
|
||||||
haproxy.servers.servers.extend(servers);
|
|
||||||
haproxy.backends.backends.push(backend);
|
|
||||||
haproxy.frontends.frontend.push(frontend);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reload_restart(&self) -> Result<(), Error> {
|
pub async fn reload_restart(&self) -> Result<(), Error> {
|
||||||
@@ -125,262 +82,3 @@ impl<'a> LoadBalancerConfig<'a> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
|
|
||||||
let pos = haproxy
|
|
||||||
.frontends
|
|
||||||
.frontend
|
|
||||||
.iter()
|
|
||||||
.position(|f| f.bind == bind_address);
|
|
||||||
|
|
||||||
match pos {
|
|
||||||
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
|
|
||||||
let default_backend = old_frontend.default_backend?;
|
|
||||||
let pos = haproxy
|
|
||||||
.backends
|
|
||||||
.backends
|
|
||||||
.iter()
|
|
||||||
.position(|b| b.uuid == default_backend);
|
|
||||||
|
|
||||||
match pos {
|
|
||||||
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
|
|
||||||
None => None, // orphaned frontend, shouldn't happen
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
|
||||||
if let Some(uuid) = &backend.health_check.content {
|
|
||||||
haproxy
|
|
||||||
.healthchecks
|
|
||||||
.healthchecks
|
|
||||||
.retain(|h| h.uuid != *uuid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove the backend's servers. This assumes servers are not shared between services.
|
|
||||||
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
|
||||||
if let Some(server_uuids_str) = &backend.linked_servers.content {
|
|
||||||
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
|
|
||||||
haproxy
|
|
||||||
.servers
|
|
||||||
.servers
|
|
||||||
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::config::DummyOPNSenseShell;
|
|
||||||
use assertor::*;
|
|
||||||
use opnsense_config_xml::{
|
|
||||||
Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
|
|
||||||
HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
|
|
||||||
};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use super::LoadBalancerConfig;
|
|
||||||
|
|
||||||
static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
|
|
||||||
static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
|
|
||||||
|
|
||||||
static SERVER_ADDRESS: &str = "1.1.1.1:80";
|
|
||||||
static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn configure_service_should_add_all_service_components_to_haproxy() {
|
|
||||||
let mut opnsense = given_opnsense();
|
|
||||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
||||||
let (healthcheck, servers, backend, frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
||||||
|
|
||||||
load_balancer.configure_service(
|
|
||||||
frontend.clone(),
|
|
||||||
backend.clone(),
|
|
||||||
servers.clone(),
|
|
||||||
Some(healthcheck.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_haproxy_configured_with(
|
|
||||||
opnsense,
|
|
||||||
vec![frontend],
|
|
||||||
vec![backend],
|
|
||||||
servers,
|
|
||||||
vec![healthcheck],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn configure_service_should_replace_service_on_same_bind_address() {
|
|
||||||
let (healthcheck, servers, backend, frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
||||||
let mut opnsense = given_opnsense_with(given_haproxy(
|
|
||||||
vec![frontend.clone()],
|
|
||||||
vec![backend.clone()],
|
|
||||||
servers.clone(),
|
|
||||||
vec![healthcheck.clone()],
|
|
||||||
));
|
|
||||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
||||||
|
|
||||||
let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
|
||||||
|
|
||||||
load_balancer.configure_service(
|
|
||||||
updated_frontend.clone(),
|
|
||||||
updated_backend.clone(),
|
|
||||||
updated_servers.clone(),
|
|
||||||
Some(updated_healthcheck.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_haproxy_configured_with(
|
|
||||||
opnsense,
|
|
||||||
vec![updated_frontend],
|
|
||||||
vec![updated_backend],
|
|
||||||
updated_servers,
|
|
||||||
vec![updated_healthcheck],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
|
|
||||||
let (healthcheck, servers, backend, frontend) =
|
|
||||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
||||||
let (other_healthcheck, other_servers, other_backend, other_frontend) =
|
|
||||||
given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
|
||||||
let mut opnsense = given_opnsense_with(given_haproxy(
|
|
||||||
vec![frontend.clone()],
|
|
||||||
vec![backend.clone()],
|
|
||||||
servers.clone(),
|
|
||||||
vec![healthcheck.clone()],
|
|
||||||
));
|
|
||||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
||||||
|
|
||||||
load_balancer.configure_service(
|
|
||||||
other_frontend.clone(),
|
|
||||||
other_backend.clone(),
|
|
||||||
other_servers.clone(),
|
|
||||||
Some(other_healthcheck.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_haproxy_configured_with(
|
|
||||||
opnsense,
|
|
||||||
vec![frontend, other_frontend],
|
|
||||||
vec![backend, other_backend],
|
|
||||||
[servers, other_servers].concat(),
|
|
||||||
vec![healthcheck, other_healthcheck],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn assert_haproxy_configured_with(
|
|
||||||
opnsense: OPNsense,
|
|
||||||
frontends: Vec<Frontend>,
|
|
||||||
backends: Vec<HAProxyBackend>,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthchecks: Vec<HAProxyHealthCheck>,
|
|
||||||
) {
|
|
||||||
let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
|
|
||||||
assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
|
|
||||||
assert_that!(haproxy.backends.backends).contains_exactly(backends);
|
|
||||||
assert_that!(haproxy.servers.servers).is_equal_to(servers);
|
|
||||||
assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_opnsense() -> OPNsense {
|
|
||||||
OPNsense::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
|
|
||||||
let mut opnsense = OPNsense::default();
|
|
||||||
opnsense.opnsense.haproxy = Some(haproxy);
|
|
||||||
|
|
||||||
opnsense
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
|
|
||||||
let opnsense_shell = Arc::new(DummyOPNSenseShell {});
|
|
||||||
if opnsense.opnsense.haproxy.is_none() {
|
|
||||||
opnsense.opnsense.haproxy = Some(HAProxy::default());
|
|
||||||
}
|
|
||||||
LoadBalancerConfig::new(opnsense, opnsense_shell)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_service(
|
|
||||||
bind_address: &str,
|
|
||||||
server_address: &str,
|
|
||||||
) -> (
|
|
||||||
HAProxyHealthCheck,
|
|
||||||
Vec<HAProxyServer>,
|
|
||||||
HAProxyBackend,
|
|
||||||
Frontend,
|
|
||||||
) {
|
|
||||||
let healthcheck = given_healthcheck();
|
|
||||||
let servers = vec![given_server(server_address)];
|
|
||||||
let backend = given_backend();
|
|
||||||
let frontend = given_frontend(bind_address);
|
|
||||||
(healthcheck, servers, backend, frontend)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_haproxy(
|
|
||||||
frontends: Vec<Frontend>,
|
|
||||||
backends: Vec<HAProxyBackend>,
|
|
||||||
servers: Vec<HAProxyServer>,
|
|
||||||
healthchecks: Vec<HAProxyHealthCheck>,
|
|
||||||
) -> HAProxy {
|
|
||||||
HAProxy {
|
|
||||||
frontends: HAProxyFrontends {
|
|
||||||
frontend: frontends,
|
|
||||||
},
|
|
||||||
backends: HAProxyBackends { backends },
|
|
||||||
servers: HAProxyServers { servers },
|
|
||||||
healthchecks: HAProxyHealthChecks { healthchecks },
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_frontend(bind_address: &str) -> Frontend {
|
|
||||||
Frontend {
|
|
||||||
uuid: "uuid".into(),
|
|
||||||
id: HAProxyId::default(),
|
|
||||||
enabled: 1,
|
|
||||||
name: format!("frontend_{bind_address}"),
|
|
||||||
bind: bind_address.into(),
|
|
||||||
default_backend: Some("backend-uuid".into()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_backend() -> HAProxyBackend {
|
|
||||||
HAProxyBackend {
|
|
||||||
uuid: "backend-uuid".into(),
|
|
||||||
id: HAProxyId::default(),
|
|
||||||
enabled: 1,
|
|
||||||
name: "backend_192.168.1.1:80".into(),
|
|
||||||
linked_servers: MaybeString::from("server-uuid"),
|
|
||||||
health_check_enabled: 1,
|
|
||||||
health_check: MaybeString::from("healthcheck-uuid"),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_server(address: &str) -> HAProxyServer {
|
|
||||||
HAProxyServer {
|
|
||||||
uuid: "server-uuid".into(),
|
|
||||||
id: HAProxyId::default(),
|
|
||||||
name: address.into(),
|
|
||||||
address: Some(address.into()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_healthcheck() -> HAProxyHealthCheck {
|
|
||||||
HAProxyHealthCheck {
|
|
||||||
uuid: "healthcheck-uuid".into(),
|
|
||||||
name: "healthcheck".into(),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user