Compare commits
18 Commits
feat/webap
...
configure-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea7322f38c | ||
|
|
2edd24753a | ||
|
|
da5be17cb6 | ||
|
|
1265cebfa7 | ||
|
|
073cccde2f | ||
|
|
77e09436a9 | ||
|
|
45e0de2097 | ||
|
|
731dc5f404 | ||
|
|
1199564122 | ||
|
|
f2f55d98d4 | ||
| 7b6ac6641a | |||
| 58c1fd4a96 | |||
| 2388f585f5 | |||
| ffe3c09907 | |||
| 0de52aedbf | |||
| 427009bbfe | |||
| fe0501b784 | |||
| 61b02e7a28 |
53
Cargo.lock
generated
53
Cargo.lock
generated
@@ -1780,7 +1780,6 @@ dependencies = [
|
||||
name = "example-nanodc"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"brocade",
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
@@ -1789,7 +1788,6 @@ dependencies = [
|
||||
"harmony_tui",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"serde",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
@@ -1808,7 +1806,6 @@ dependencies = [
|
||||
name = "example-okd-install"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"brocade",
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
@@ -1839,16 +1836,25 @@ dependencies = [
|
||||
name = "example-opnsense"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"brocade",
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_macros",
|
||||
"harmony_secret",
|
||||
"harmony_tui",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"serde",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-penpot"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
@@ -1857,7 +1863,6 @@ dependencies = [
|
||||
name = "example-pxe"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"brocade",
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
@@ -1872,15 +1877,6 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-remove-rook-osd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-rust"
|
||||
version = "0.1.0"
|
||||
@@ -2460,6 +2456,17 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_derive"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d138bbb32bb346299c5f95fbb53532313f39927cb47c411c99c634ef8665ef7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_inventory_agent"
|
||||
version = "0.1.0"
|
||||
@@ -3906,6 +3913,19 @@ dependencies = [
|
||||
"web-time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "okd_host_network"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_derive",
|
||||
"harmony_inventory_agent",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.21.3"
|
||||
@@ -3934,7 +3954,6 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
||||
name = "opnsense-config"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"assertor",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"env_logger",
|
||||
|
||||
@@ -15,8 +15,7 @@ members = [
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
"harmony_secret",
|
||||
"adr/agent_discovery/mdns",
|
||||
"brocade",
|
||||
"adr/agent_discovery/mdns", "brocade",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
|
||||
@@ -10,7 +10,6 @@ use log::{debug, info};
|
||||
use regex::Regex;
|
||||
use std::{collections::HashSet, str::FromStr};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FastIronClient {
|
||||
shell: BrocadeShell,
|
||||
version: BrocadeInfo,
|
||||
@@ -71,7 +70,7 @@ impl FastIronClient {
|
||||
|
||||
Some(Ok(InterSwitchLink {
|
||||
local_port,
|
||||
remote_port: None,
|
||||
remote_port: None, // FIXME: Map the remote port as well
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -162,7 +162,7 @@ pub async fn init(
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait BrocadeClient: std::fmt::Debug {
|
||||
pub trait BrocadeClient {
|
||||
/// Retrieves the operating system and version details from the connected Brocade switch.
|
||||
///
|
||||
/// This is typically the first call made after establishing a connection to determine
|
||||
|
||||
@@ -10,7 +10,6 @@ use crate::{
|
||||
parse_brocade_mac_address, shell::BrocadeShell,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NetworkOperatingSystemClient {
|
||||
shell: BrocadeShell,
|
||||
version: BrocadeInfo,
|
||||
@@ -271,7 +270,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
commands.push("no ip address".into());
|
||||
commands.push("no fabric isl enable".into());
|
||||
commands.push("no fabric trunk enable".into());
|
||||
commands.push(format!("channel-group {channel_id} mode active"));
|
||||
commands.push(format!("channel-group {} mode active", channel_id));
|
||||
commands.push("no shutdown".into());
|
||||
commands.push("exit".into());
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ use log::info;
|
||||
use russh::ChannelMsg;
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeShell {
|
||||
ip: IpAddr,
|
||||
port: u16,
|
||||
|
||||
@@ -27,7 +27,6 @@ async fn main() {
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
dns: "example-monitoring.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -17,5 +17,3 @@ harmony_secret = { path = "../../harmony_secret" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
brocade = { path = "../../brocade" }
|
||||
|
||||
@@ -3,13 +3,12 @@ use std::{
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
config::secret::SshKeyPair,
|
||||
data::{FileContent, FilePath},
|
||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
http::StaticFilesHttpScore,
|
||||
@@ -23,9 +22,8 @@ use harmony::{
|
||||
topology::{LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::net::Url;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@@ -34,26 +32,6 @@ async fn main() {
|
||||
name: String::from("fw0"),
|
||||
};
|
||||
|
||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
&switch_auth.password,
|
||||
brocade_options,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to connect to switch");
|
||||
|
||||
let switch_client = Arc::new(switch_client);
|
||||
|
||||
let opnsense = Arc::new(
|
||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||
);
|
||||
@@ -105,7 +83,7 @@ async fn main() {
|
||||
name: "wk2".to_string(),
|
||||
},
|
||||
],
|
||||
switch_client: switch_client.clone(),
|
||||
switch: vec![],
|
||||
};
|
||||
|
||||
let inventory = Inventory {
|
||||
@@ -188,9 +166,3 @@ async fn main() {
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||
pub struct BrocadeSwitchAuth {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
@@ -19,4 +19,3 @@ log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
serde.workspace = true
|
||||
brocade = { path = "../../brocade" }
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use brocade::BrocadeOptions;
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
hardware::{Location, SwitchGroup},
|
||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
@@ -23,26 +22,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
name: String::from("opnsense-1"),
|
||||
};
|
||||
|
||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
&switch_auth.password,
|
||||
brocade_options,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to connect to switch");
|
||||
|
||||
let switch_client = Arc::new(switch_client);
|
||||
|
||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
||||
let config = config.unwrap();
|
||||
|
||||
@@ -79,7 +58,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
name: "bootstrap".to_string(),
|
||||
},
|
||||
workers: vec![],
|
||||
switch_client: switch_client.clone(),
|
||||
switch: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,9 +75,3 @@ pub fn get_inventory() -> Inventory {
|
||||
control_plane_host: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||
pub struct BrocadeSwitchAuth {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
@@ -19,4 +19,3 @@ log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
serde.workspace = true
|
||||
brocade = { path = "../../brocade" }
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
use brocade::BrocadeOptions;
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
config::secret::OPNSenseFirewallCredentials,
|
||||
hardware::{Location, SwitchGroup},
|
||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, ipv4};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use harmony_secret::SecretManager;
|
||||
use std::{net::IpAddr, sync::Arc};
|
||||
|
||||
pub async fn get_topology() -> HAClusterTopology {
|
||||
@@ -18,26 +16,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
name: String::from("opnsense-1"),
|
||||
};
|
||||
|
||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
&switch_auth.password,
|
||||
brocade_options,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to connect to switch");
|
||||
|
||||
let switch_client = Arc::new(switch_client);
|
||||
|
||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
||||
let config = config.unwrap();
|
||||
|
||||
@@ -74,7 +52,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
name: "cp0".to_string(),
|
||||
},
|
||||
workers: vec![],
|
||||
switch_client: switch_client.clone(),
|
||||
switch: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,9 +69,3 @@ pub fn get_inventory() -> Inventory {
|
||||
control_plane_host: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||
pub struct BrocadeSwitchAuth {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
@@ -16,6 +16,3 @@ harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
harmony_secret = { path = "../../harmony_secret" }
|
||||
brocade = { path = "../../brocade" }
|
||||
serde = { workspace = true }
|
||||
|
||||
@@ -3,11 +3,10 @@ use std::{
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
@@ -19,9 +18,7 @@ use harmony::{
|
||||
topology::{LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use harmony_types::net::Url;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@@ -30,26 +27,6 @@ async fn main() {
|
||||
name: String::from("opnsense-1"),
|
||||
};
|
||||
|
||||
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
&switch_auth.password,
|
||||
brocade_options,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to connect to switch");
|
||||
|
||||
let switch_client = Arc::new(switch_client);
|
||||
|
||||
let opnsense = Arc::new(
|
||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||
);
|
||||
@@ -77,7 +54,7 @@ async fn main() {
|
||||
name: "cp0".to_string(),
|
||||
},
|
||||
workers: vec![],
|
||||
switch_client: switch_client.clone(),
|
||||
switch: vec![],
|
||||
};
|
||||
|
||||
let inventory = Inventory {
|
||||
@@ -132,9 +109,3 @@ async fn main() {
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||
pub struct BrocadeSwitchAuth {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
[package]
|
||||
name = "example-remove-rook-osd"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
tokio.workspace = true
|
||||
@@ -1,18 +0,0 @@
|
||||
use harmony::{
|
||||
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let ceph_score = CephRemoveOsd {
|
||||
osd_deployment_name: "rook-ceph-osd-2".to_string(),
|
||||
rook_ceph_namespace: "rook-ceph".to_string(),
|
||||
};
|
||||
|
||||
let topology = K8sAnywhereTopology::from_env();
|
||||
let inventory = Inventory::autoload();
|
||||
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -16,7 +16,6 @@ use harmony_types::net::Url;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "test-rhob-monitoring".to_string(),
|
||||
dns: "test-rhob-monitoring.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -19,7 +19,6 @@ use harmony_macros::hurl;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
dns: "harmony-example-rust-webapp.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -2,11 +2,12 @@ use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
features::{rhob_monitoring::Monitoring, PackagingDeployment}, ApplicationScore, RustWebFramework, RustWebapp
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, LocalhostTopology},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
@@ -21,8 +22,8 @@ async fn main() {
|
||||
});
|
||||
|
||||
let discord_webhook = DiscordWebhook {
|
||||
name: "harmony-demo".to_string(),
|
||||
url: hurl!("https://discord.com/api/webhooks/1415391405681021050/V6KzV41vQ7yvbn7BchejRu9C8OANxy0i2ESZOz2nvCxG8xAY3-2i3s5MS38k568JKTzH"),
|
||||
name: "harmony_demo".to_string(),
|
||||
url: hurl!("http://not_a_url.com"),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
|
||||
@@ -16,7 +16,6 @@ use std::{path::PathBuf, sync::Arc};
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-tryrust".to_string(),
|
||||
dns: "tryrust.example.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
||||
// submodule
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
|
||||
@@ -12,11 +12,11 @@ pub type FirewallGroup = Vec<PhysicalHost>;
|
||||
pub struct PhysicalHost {
|
||||
pub id: Id,
|
||||
pub category: HostCategory,
|
||||
pub network: Vec<NetworkInterface>,
|
||||
pub storage: Vec<StorageDrive>,
|
||||
pub network: Vec<NetworkInterface>, // FIXME: Don't use harmony_inventory_agent::NetworkInterface
|
||||
pub storage: Vec<StorageDrive>, // FIXME: Don't use harmony_inventory_agent::StorageDrive
|
||||
pub labels: Vec<Label>,
|
||||
pub memory_modules: Vec<MemoryModule>,
|
||||
pub cpus: Vec<CPU>,
|
||||
pub memory_modules: Vec<MemoryModule>, // FIXME: Don't use harmony_inventory_agent::MemoryModule
|
||||
pub cpus: Vec<CPU>, // FIXME: Don't use harmony_inventory_agent::CPU
|
||||
}
|
||||
|
||||
impl PhysicalHost {
|
||||
|
||||
@@ -30,7 +30,6 @@ pub enum InterpretName {
|
||||
Lamp,
|
||||
ApplicationMonitoring,
|
||||
K8sPrometheusCrdAlerting,
|
||||
CephRemoveOsd,
|
||||
DiscoverInventoryAgent,
|
||||
CephClusterHealth,
|
||||
Custom(&'static str),
|
||||
@@ -62,7 +61,6 @@ impl std::fmt::Display for InterpretName {
|
||||
InterpretName::Lamp => f.write_str("LAMP"),
|
||||
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
||||
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
||||
InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"),
|
||||
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||
InterpretName::Custom(name) => f.write_str(name),
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::BrocadeOptions;
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::{
|
||||
net::{MacAddress, Url},
|
||||
switch::PortLocation,
|
||||
};
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::net::Url;
|
||||
use harmony_types::switch::PortLocation;
|
||||
use k8s_openapi::api::core::v1::Namespace;
|
||||
use kube::api::ObjectMeta;
|
||||
use log::debug;
|
||||
@@ -12,20 +13,44 @@ use log::info;
|
||||
use crate::data::FileContent;
|
||||
use crate::executors::ExecutorError;
|
||||
use crate::hardware::PhysicalHost;
|
||||
use crate::modules::okd::crd::{
|
||||
InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec,
|
||||
nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec},
|
||||
};
|
||||
use crate::infra::brocade::BrocadeSwitchAuth;
|
||||
use crate::infra::brocade::BrocadeSwitchClient;
|
||||
use crate::modules::okd::crd::InstallPlanApproval;
|
||||
use crate::modules::okd::crd::OperatorGroup;
|
||||
use crate::modules::okd::crd::OperatorGroupSpec;
|
||||
use crate::modules::okd::crd::Subscription;
|
||||
use crate::modules::okd::crd::SubscriptionSpec;
|
||||
use crate::modules::okd::crd::nmstate;
|
||||
use crate::modules::okd::crd::nmstate::NMState;
|
||||
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicy;
|
||||
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec;
|
||||
use crate::topology::PxeOptions;
|
||||
|
||||
use super::{
|
||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
||||
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
||||
Topology, k8s::K8sClient,
|
||||
};
|
||||
use super::DHCPStaticEntry;
|
||||
use super::DhcpServer;
|
||||
use super::DnsRecord;
|
||||
use super::DnsRecordType;
|
||||
use super::DnsServer;
|
||||
use super::Firewall;
|
||||
use super::HostNetworkConfig;
|
||||
use super::HttpServer;
|
||||
use super::IpAddress;
|
||||
use super::K8sclient;
|
||||
use super::LoadBalancer;
|
||||
use super::LoadBalancerService;
|
||||
use super::LogicalHost;
|
||||
use super::PreparationError;
|
||||
use super::PreparationOutcome;
|
||||
use super::Router;
|
||||
use super::Switch;
|
||||
use super::SwitchClient;
|
||||
use super::SwitchError;
|
||||
use super::TftpServer;
|
||||
|
||||
use super::Topology;
|
||||
use super::k8s::K8sClient;
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -38,10 +63,10 @@ pub struct HAClusterTopology {
|
||||
pub tftp_server: Arc<dyn TftpServer>,
|
||||
pub http_server: Arc<dyn HttpServer>,
|
||||
pub dns_server: Arc<dyn DnsServer>,
|
||||
pub switch_client: Arc<dyn SwitchClient>,
|
||||
pub bootstrap_host: LogicalHost,
|
||||
pub control_plane: Vec<LogicalHost>,
|
||||
pub workers: Vec<LogicalHost>,
|
||||
pub switch: Vec<LogicalHost>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -275,15 +300,36 @@ impl HAClusterTopology {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_switch_client(&self) -> Result<Box<dyn SwitchClient>, SwitchError> {
|
||||
let auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to get credentials: {e}")))?;
|
||||
|
||||
// FIXME: We assume Brocade switches
|
||||
let switches: Vec<IpAddr> = self.switch.iter().map(|s| s.ip).collect();
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
dry_run: *crate::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
let client =
|
||||
BrocadeSwitchClient::init(&switches, &auth.username, &auth.password, brocade_options)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to connect to switch: {e}")))?;
|
||||
|
||||
Ok(Box::new(client))
|
||||
}
|
||||
|
||||
async fn configure_port_channel(
|
||||
&self,
|
||||
host: &PhysicalHost,
|
||||
config: &HostNetworkConfig,
|
||||
) -> Result<(), SwitchError> {
|
||||
debug!("Configuring port channel: {config:#?}");
|
||||
let client = self.get_switch_client().await?;
|
||||
|
||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||
|
||||
self.switch_client
|
||||
client
|
||||
.configure_port_channel(&format!("Harmony_{}", host.id), switch_ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
||||
@@ -307,10 +353,10 @@ impl HAClusterTopology {
|
||||
tftp_server: dummy_infra.clone(),
|
||||
http_server: dummy_infra.clone(),
|
||||
dns_server: dummy_infra.clone(),
|
||||
switch_client: dummy_infra.clone(),
|
||||
bootstrap_host: dummy_host,
|
||||
control_plane: vec![],
|
||||
workers: vec![],
|
||||
switch: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -468,7 +514,8 @@ impl HttpServer for HAClusterTopology {
|
||||
#[async_trait]
|
||||
impl Switch for HAClusterTopology {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
self.switch_client.setup().await?;
|
||||
let client = self.get_switch_client().await?;
|
||||
client.setup().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -476,7 +523,8 @@ impl Switch for HAClusterTopology {
|
||||
&self,
|
||||
mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError> {
|
||||
let port = self.switch_client.find_port(mac_address).await?;
|
||||
let client = self.get_switch_client().await?;
|
||||
let port = client.find_port(mac_address).await?;
|
||||
Ok(port)
|
||||
}
|
||||
|
||||
@@ -485,7 +533,7 @@ impl Switch for HAClusterTopology {
|
||||
host: &PhysicalHost,
|
||||
config: HostNetworkConfig,
|
||||
) -> Result<(), SwitchError> {
|
||||
self.configure_bond(host, &config).await?;
|
||||
// self.configure_bond(host, &config).await?;
|
||||
self.configure_port_channel(host, &config).await
|
||||
}
|
||||
}
|
||||
@@ -676,25 +724,3 @@ impl DnsServer for DummyInfra {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SwitchClient for DummyInfra {
|
||||
async fn setup(&self) -> Result<(), SwitchError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
|
||||
async fn find_port(
|
||||
&self,
|
||||
_mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
|
||||
async fn configure_port_channel(
|
||||
&self,
|
||||
_channel_name: &str,
|
||||
_switch_ports: Vec<PortLocation>,
|
||||
) -> Result<u8, SwitchError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
use std::time::Duration;
|
||||
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{
|
||||
ClusterResourceScope, NamespaceResourceScope,
|
||||
api::{apps::v1::Deployment, core::v1::Pod},
|
||||
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
|
||||
apimachinery::pkg::version::Info,
|
||||
api::{
|
||||
apps::v1::Deployment,
|
||||
core::v1::{Pod, PodStatus},
|
||||
},
|
||||
};
|
||||
use kube::{
|
||||
Client, Config, Discovery, Error, Resource,
|
||||
Client, Config, Error, Resource,
|
||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
@@ -20,9 +21,9 @@ use kube::{
|
||||
api::{ApiResource, GroupVersionKind},
|
||||
runtime::wait::await_condition,
|
||||
};
|
||||
use log::{debug, error, info, trace};
|
||||
use log::{debug, error, trace};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::{json, Value};
|
||||
use serde_json::{Value, json};
|
||||
use similar::TextDiff;
|
||||
use tokio::{io::AsyncReadExt, time::sleep};
|
||||
|
||||
@@ -58,159 +59,6 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
// Returns true if any deployment in the given namespace matching the label selector
|
||||
// has status.availableReplicas > 0 (or condition Available=True).
|
||||
pub async fn has_healthy_deployment_with_label(
|
||||
&self,
|
||||
namespace: &str,
|
||||
label_selector: &str,
|
||||
) -> Result<bool, Error> {
|
||||
let api: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
|
||||
let lp = ListParams::default().labels(label_selector);
|
||||
let list = api.list(&lp).await?;
|
||||
for d in list.items {
|
||||
// Check AvailableReplicas > 0 or Available condition
|
||||
let available = d
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.available_replicas)
|
||||
.unwrap_or(0);
|
||||
if available > 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
// Fallback: scan conditions
|
||||
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
|
||||
if conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
}) {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Cluster-wide: returns namespaces that have at least one healthy deployment
|
||||
// matching the label selector (equivalent to kubectl -A -l ...).
|
||||
pub async fn list_namespaces_with_healthy_deployments(
|
||||
&self,
|
||||
label_selector: &str,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let api: Api<Deployment> = Api::all(self.client.clone());
|
||||
let lp = ListParams::default().labels(label_selector);
|
||||
let list = api.list(&lp).await?;
|
||||
|
||||
let mut healthy_ns: HashMap<String, bool> = HashMap::new();
|
||||
for d in list.items {
|
||||
let ns = match d.metadata.namespace.clone() {
|
||||
Some(n) => n,
|
||||
None => continue,
|
||||
};
|
||||
let available = d
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.available_replicas)
|
||||
.unwrap_or(0);
|
||||
let is_healthy = if available > 0 {
|
||||
true
|
||||
} else {
|
||||
d.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.conditions.as_ref())
|
||||
.map(|conds| {
|
||||
conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
})
|
||||
})
|
||||
.unwrap_or(false)
|
||||
};
|
||||
if is_healthy {
|
||||
healthy_ns.insert(ns, true);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(healthy_ns.into_keys().collect())
|
||||
}
|
||||
|
||||
// Get the application-controller ServiceAccount name (fallback to default)
|
||||
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> {
|
||||
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
|
||||
let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
|
||||
let list = api.list(&lp).await?;
|
||||
if let Some(dep) = list.items.get(0) {
|
||||
if let Some(sa) = dep
|
||||
.spec
|
||||
.as_ref()
|
||||
.and_then(|ds| ds.template.spec.as_ref())
|
||||
.and_then(|ps| ps.service_account_name.clone())
|
||||
{
|
||||
return Ok(sa);
|
||||
}
|
||||
}
|
||||
Ok("argocd-application-controller".to_string())
|
||||
}
|
||||
|
||||
// List ClusterRoleBindings dynamically and return as JSON values
|
||||
pub async fn list_clusterrolebindings_json(&self) -> Result<Vec<Value>, Error> {
|
||||
let gvk = kube::api::GroupVersionKind::gvk(
|
||||
"rbac.authorization.k8s.io",
|
||||
"v1",
|
||||
"ClusterRoleBinding",
|
||||
);
|
||||
let ar = kube::api::ApiResource::from_gvk(&gvk);
|
||||
let api: Api<kube::api::DynamicObject> = Api::all_with(self.client.clone(), &ar);
|
||||
let crbs = api.list(&ListParams::default()).await?;
|
||||
let mut out = Vec::new();
|
||||
for o in crbs {
|
||||
let v = serde_json::to_value(&o).unwrap_or(Value::Null);
|
||||
out.push(v);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// Determine if Argo controller in ns has cluster-wide permissions via CRBs
|
||||
// TODO This does not belong in the generic k8s client, should be refactored at some point
|
||||
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> {
|
||||
let sa = self.get_argocd_controller_sa_name(ns).await?;
|
||||
let crbs = self.list_clusterrolebindings_json().await?;
|
||||
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
|
||||
for crb in crbs {
|
||||
if let Some(subjects) = crb.get("subjects").and_then(|s| s.as_array()) {
|
||||
for subj in subjects {
|
||||
let kind = subj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let name = subj.get("name").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let subj_ns = subj.get("namespace").and_then(|v| v.as_str()).unwrap_or("");
|
||||
if (kind == "ServiceAccount" && name == sa && subj_ns == ns)
|
||||
|| (kind == "User" && name == sa_user)
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub async fn has_crd(&self, name: &str) -> Result<bool, Error> {
|
||||
let api: Api<CustomResourceDefinition> = Api::all(self.client.clone());
|
||||
let lp = ListParams::default().fields(&format!("metadata.name={}", name));
|
||||
let crds = api.list(&lp).await?;
|
||||
Ok(!crds.items.is_empty())
|
||||
}
|
||||
|
||||
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
||||
let client: Client = self.client.clone();
|
||||
let version_info: Info = client.apiserver_version().await?;
|
||||
Ok(version_info)
|
||||
}
|
||||
|
||||
pub async fn discovery(&self) -> Result<Discovery, Error> {
|
||||
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
|
||||
Ok(discovery)
|
||||
}
|
||||
|
||||
pub async fn get_resource_json_value(
|
||||
&self,
|
||||
name: &str,
|
||||
@@ -232,13 +80,10 @@ impl K8sClient {
|
||||
namespace: Option<&str>,
|
||||
) -> Result<Option<Deployment>, Error> {
|
||||
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
||||
debug!("getting namespaced deployment");
|
||||
Api::namespaced(self.client.clone(), ns)
|
||||
} else {
|
||||
debug!("getting default namespace deployment");
|
||||
Api::default_namespaced(self.client.clone())
|
||||
};
|
||||
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
||||
Ok(deps.get_opt(name).await?)
|
||||
}
|
||||
|
||||
@@ -269,7 +114,7 @@ impl K8sClient {
|
||||
}
|
||||
});
|
||||
let pp = PatchParams::default();
|
||||
let scale = Patch::Merge(&patch);
|
||||
let scale = Patch::Apply(&patch);
|
||||
deployments.patch_scale(name, &pp, &scale).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::GroupVersionKind;
|
||||
use log::{debug, info, trace, warn};
|
||||
use log::{debug, info, warn};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
@@ -47,13 +47,6 @@ struct K8sState {
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum KubernetesDistribution {
|
||||
OpenshiftFamily,
|
||||
K3sFamily,
|
||||
Default,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum K8sSource {
|
||||
LocalK3d,
|
||||
@@ -64,14 +57,12 @@ enum K8sSource {
|
||||
pub struct K8sAnywhereTopology {
|
||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
||||
k8s_distribution: Arc<OnceCell<KubernetesDistribution>>,
|
||||
config: Arc<K8sAnywhereConfig>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl K8sclient for K8sAnywhereTopology {
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||
trace!("getting k8s client");
|
||||
let state = match self.k8s_state.get() {
|
||||
Some(state) => state,
|
||||
None => return Err("K8s state not initialized yet".to_string()),
|
||||
@@ -171,7 +162,6 @@ impl K8sAnywhereTopology {
|
||||
Self {
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
k8s_distribution: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
||||
}
|
||||
}
|
||||
@@ -180,42 +170,10 @@ impl K8sAnywhereTopology {
|
||||
Self {
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
k8s_distribution: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(config),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
|
||||
self.k8s_distribution
|
||||
.get_or_try_init(async || {
|
||||
let client = self.k8s_client().await.unwrap();
|
||||
|
||||
let discovery = client.discovery().await.map_err(|e| {
|
||||
PreparationError::new(format!("Could not discover API groups: {}", e))
|
||||
})?;
|
||||
|
||||
let version = client.get_apiserver_version().await.map_err(|e| {
|
||||
PreparationError::new(format!("Could not get server version: {}", e))
|
||||
})?;
|
||||
|
||||
// OpenShift / OKD
|
||||
if discovery
|
||||
.groups()
|
||||
.any(|g| g.name() == "project.openshift.io")
|
||||
{
|
||||
return Ok(KubernetesDistribution::OpenshiftFamily);
|
||||
}
|
||||
|
||||
// K3d / K3s
|
||||
if version.git_version.contains("k3s") {
|
||||
return Ok(KubernetesDistribution::K3sFamily);
|
||||
}
|
||||
|
||||
return Ok(KubernetesDistribution::Default);
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
||||
&self,
|
||||
sender: RHOBObservability,
|
||||
@@ -621,56 +579,36 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
use log::{trace, debug, warn};
|
||||
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
// Local developer UX
|
||||
return Ok(format!("{service}.local.k3d"));
|
||||
}
|
||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
||||
K8sSource::Kubeconfig => {
|
||||
trace!("K8sSource is kubeconfig; attempting to detect domain");
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
|
||||
// 1) Try OpenShift IngressController domain (backward compatible)
|
||||
if self.openshift_ingress_operator_available().await.is_ok() {
|
||||
trace!("OpenShift ingress operator detected; using IngressController");
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await
|
||||
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
||||
})?;
|
||||
|
||||
if let Some(domain) = ic.data["status"]["domain"].as_str() {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
} else {
|
||||
warn!("OpenShift IngressController present but no status.domain set");
|
||||
}
|
||||
} else {
|
||||
trace!("OpenShift ingress operator not detected; trying generic Kubernetes");
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
||||
}
|
||||
|
||||
// 2) Try NGINX Ingress Controller common setups
|
||||
// 2.a) Well-known namespace/name for the controller Service
|
||||
// - upstream default: namespace "ingress-nginx", service "ingress-nginx-controller"
|
||||
// - some distros: "ingress-nginx-controller" svc in "ingress-nginx" ns
|
||||
// If found with LoadBalancer ingress hostname, use its base domain.
|
||||
if let Some(domain) = try_nginx_lb_domain(&client).await? {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
}
|
||||
|
||||
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
|
||||
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
|
||||
warn!("Could not determine external ingress domain; falling back to internal-only DNS");
|
||||
let internal = format!("{service}.default.svc.cluster.local");
|
||||
Ok(internal)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -680,57 +618,3 @@ impl Ingress for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
|
||||
use log::{trace, debug};
|
||||
|
||||
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
|
||||
let svc_gvk = GroupVersionKind {
|
||||
group: "".into(), // core
|
||||
version: "v1".into(),
|
||||
kind: "Service".into(),
|
||||
};
|
||||
|
||||
let candidates = [
|
||||
("ingress-nginx", "ingress-nginx-controller"),
|
||||
("ingress-nginx", "ingress-nginx-controller-internal"),
|
||||
("ingress-nginx", "ingress-nginx"), // some charts name the svc like this
|
||||
("kube-system", "ingress-nginx-controller"), // less common but seen
|
||||
];
|
||||
|
||||
for (ns, name) in candidates {
|
||||
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
|
||||
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await {
|
||||
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default();
|
||||
for entry in lb_hosts {
|
||||
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
|
||||
debug!("Found NGINX LB hostname: {host}");
|
||||
if let Some(domain) = extract_base_domain(host) {
|
||||
return Ok(Some(domain.to_string()));
|
||||
} else {
|
||||
return Ok(Some(host.to_string())); // already a domain
|
||||
}
|
||||
}
|
||||
if let Some(ip) = entry.get("ip").and_then(|v| v.as_str()) {
|
||||
// If only an IP is exposed, we can't create a hostname; return None to keep searching
|
||||
debug!("NGINX LB exposes IP {ip} (no hostname); skipping");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn extract_base_domain(host: &str) -> Option<String> {
|
||||
// For a host like a1b2c3d4e5f6abcdef.elb.amazonaws.com -> base domain elb.amazonaws.com
|
||||
// For a managed DNS like xyz.example.com -> base domain example.com (keep 2+ labels)
|
||||
// Heuristic: keep last 2 labels by default; special-case known multi-label TLDs if needed.
|
||||
let parts: Vec<&str> = host.split('.').collect();
|
||||
if parts.len() >= 2 {
|
||||
// Very conservative: last 2 labels
|
||||
Some(parts[parts.len() - 2..].join("."))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,13 @@ pub trait LoadBalancer: Send + Sync {
|
||||
&self,
|
||||
service: &LoadBalancerService,
|
||||
) -> Result<(), ExecutorError> {
|
||||
self.add_service(service).await?;
|
||||
debug!(
|
||||
"Listing LoadBalancer services {:?}",
|
||||
self.list_services().await
|
||||
);
|
||||
if !self.list_services().await.contains(service) {
|
||||
self.add_service(service).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ impl TopologyState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum DeploymentTarget {
|
||||
LocalDev,
|
||||
Staging,
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
use std::{
|
||||
error::Error,
|
||||
fmt::{self, Debug},
|
||||
net::Ipv4Addr,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
};
|
||||
use std::{error::Error, net::Ipv4Addr, str::FromStr, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
@@ -25,8 +19,8 @@ pub struct DHCPStaticEntry {
|
||||
pub ip: Ipv4Addr,
|
||||
}
|
||||
|
||||
impl fmt::Display for DHCPStaticEntry {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
impl std::fmt::Display for DHCPStaticEntry {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mac = self
|
||||
.mac
|
||||
.iter()
|
||||
@@ -48,8 +42,8 @@ pub trait Firewall: Send + Sync {
|
||||
fn get_host(&self) -> LogicalHost;
|
||||
}
|
||||
|
||||
impl Debug for dyn Firewall {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
impl std::fmt::Debug for dyn Firewall {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
|
||||
}
|
||||
}
|
||||
@@ -71,7 +65,7 @@ pub struct PxeOptions {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait DhcpServer: Send + Sync + Debug {
|
||||
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
|
||||
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
||||
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
||||
@@ -110,8 +104,8 @@ pub trait DnsServer: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for dyn DnsServer {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
impl std::fmt::Debug for dyn DnsServer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
|
||||
}
|
||||
}
|
||||
@@ -147,8 +141,8 @@ pub enum DnsRecordType {
|
||||
TXT,
|
||||
}
|
||||
|
||||
impl fmt::Display for DnsRecordType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
impl std::fmt::Display for DnsRecordType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DnsRecordType::A => write!(f, "A"),
|
||||
DnsRecordType::AAAA => write!(f, "AAAA"),
|
||||
@@ -222,8 +216,8 @@ pub struct SwitchError {
|
||||
msg: String,
|
||||
}
|
||||
|
||||
impl fmt::Display for SwitchError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
impl std::fmt::Display for SwitchError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.msg)
|
||||
}
|
||||
}
|
||||
@@ -231,7 +225,7 @@ impl fmt::Display for SwitchError {
|
||||
impl Error for SwitchError {}
|
||||
|
||||
#[async_trait]
|
||||
pub trait SwitchClient: Debug + Send + Sync {
|
||||
pub trait SwitchClient: Send + Sync {
|
||||
/// Executes essential, idempotent, one-time initial configuration steps.
|
||||
///
|
||||
/// This is an opiniated procedure that setups a switch to provide high availability
|
||||
|
||||
@@ -21,7 +21,6 @@ pub struct AlertingInterpret<S: AlertSender> {
|
||||
pub sender: S,
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
||||
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
||||
pub scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -39,12 +38,6 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
||||
debug!("installing rule: {:#?}", rule);
|
||||
rule.install(&self.sender).await?;
|
||||
}
|
||||
if let Some(targets) = &self.scrape_targets {
|
||||
for target in targets.iter() {
|
||||
debug!("installing scrape_target: {:#?}", target);
|
||||
target.install(&self.sender).await?;
|
||||
}
|
||||
}
|
||||
self.sender.ensure_installed(inventory, topology).await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully installed alert sender {}",
|
||||
@@ -84,6 +77,6 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
||||
pub trait ScrapeTarget<S: AlertSender> {
|
||||
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||
use harmony_secret::Secret;
|
||||
use harmony_types::{
|
||||
net::{IpAddress, MacAddress},
|
||||
switch::{PortDeclaration, PortLocation},
|
||||
};
|
||||
use option_ext::OptionExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::topology::{SwitchClient, SwitchError};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeSwitchClient {
|
||||
brocade: Box<dyn BrocadeClient + Send + Sync>,
|
||||
}
|
||||
@@ -113,6 +114,12 @@ impl SwitchClient for BrocadeSwitchClient {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||
pub struct BrocadeSwitchAuth {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::{Arc, Mutex};
|
||||
@@ -228,7 +235,7 @@ mod tests {
|
||||
assert_that!(*configured_interfaces).is_empty();
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Clone)]
|
||||
struct FakeBrocadeClient {
|
||||
stack_topology: Vec<InterSwitchLink>,
|
||||
interfaces: Vec<InterfaceInfo>,
|
||||
|
||||
@@ -10,7 +10,7 @@ use super::OPNSenseFirewall;
|
||||
|
||||
#[async_trait]
|
||||
impl DnsServer for OPNSenseFirewall {
|
||||
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||
todo!("Refactor this to use dnsmasq")
|
||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
// let mut dns = writable_opnsense.dns();
|
||||
@@ -68,7 +68,7 @@ impl DnsServer for OPNSenseFirewall {
|
||||
self.host.clone()
|
||||
}
|
||||
|
||||
async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
|
||||
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
|
||||
todo!("Refactor this to use dnsmasq")
|
||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
// let mut dns = writable_opnsense.dns();
|
||||
|
||||
@@ -26,13 +26,19 @@ impl LoadBalancer for OPNSenseFirewall {
|
||||
}
|
||||
|
||||
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
||||
warn!(
|
||||
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
|
||||
);
|
||||
let mut config = self.opnsense_config.write().await;
|
||||
let mut load_balancer = config.load_balancer();
|
||||
|
||||
let (frontend, backend, servers, healthcheck) =
|
||||
harmony_load_balancer_service_to_haproxy_xml(service);
|
||||
|
||||
load_balancer.configure_service(frontend, backend, servers, healthcheck);
|
||||
let mut load_balancer = config.load_balancer();
|
||||
load_balancer.add_backend(backend);
|
||||
load_balancer.add_frontend(frontend);
|
||||
load_balancer.add_servers(servers);
|
||||
if let Some(healthcheck) = healthcheck {
|
||||
load_balancer.add_healthcheck(healthcheck);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -100,7 +106,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
||||
.backends
|
||||
.backends
|
||||
.iter()
|
||||
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
|
||||
.find(|b| b.uuid == frontend.default_backend);
|
||||
|
||||
let mut health_check = None;
|
||||
match matching_backend {
|
||||
@@ -110,7 +116,8 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
||||
}
|
||||
None => {
|
||||
warn!(
|
||||
"HAProxy config could not find a matching backend for frontend {frontend:?}"
|
||||
"HAProxy config could not find a matching backend for frontend {:?}",
|
||||
frontend
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -145,11 +152,11 @@ pub(crate) fn get_servers_for_backend(
|
||||
.servers
|
||||
.iter()
|
||||
.filter_map(|server| {
|
||||
let address = server.address.clone()?;
|
||||
let port = server.port?;
|
||||
|
||||
if backend_servers.contains(&server.uuid.as_str()) {
|
||||
return Some(BackendServer { address, port });
|
||||
return Some(BackendServer {
|
||||
address: server.address.clone(),
|
||||
port: server.port,
|
||||
});
|
||||
}
|
||||
None
|
||||
})
|
||||
@@ -340,7 +347,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
|
||||
name: format!("frontend_{}", service.listening_port),
|
||||
bind: service.listening_port.to_string(),
|
||||
mode: "tcp".to_string(), // TODO do not depend on health check here
|
||||
default_backend: Some(backend.uuid.clone()),
|
||||
default_backend: backend.uuid.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
||||
@@ -354,8 +361,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
|
||||
uuid: Uuid::new_v4().to_string(),
|
||||
name: format!("{}_{}", &server.address, &server.port),
|
||||
enabled: 1,
|
||||
address: Some(server.address.clone()),
|
||||
port: Some(server.port),
|
||||
address: server.address.clone(),
|
||||
port: server.port,
|
||||
mode: "active".to_string(),
|
||||
server_type: "static".to_string(),
|
||||
..Default::default()
|
||||
@@ -378,8 +385,8 @@ mod tests {
|
||||
let mut haproxy = HAProxy::default();
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: Some("192.168.1.1".to_string()),
|
||||
port: Some(80),
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
@@ -404,8 +411,8 @@ mod tests {
|
||||
let mut haproxy = HAProxy::default();
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: Some("192.168.1.1".to_string()),
|
||||
port: Some(80),
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
@@ -424,8 +431,8 @@ mod tests {
|
||||
let mut haproxy = HAProxy::default();
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: Some("192.168.1.1".to_string()),
|
||||
port: Some(80),
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
@@ -446,16 +453,16 @@ mod tests {
|
||||
let mut haproxy = HAProxy::default();
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: Some("some-hostname.test.mcd".to_string()),
|
||||
port: Some(80),
|
||||
address: "some-hostname.test.mcd".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
|
||||
let server = HAProxyServer {
|
||||
uuid: "server2".to_string(),
|
||||
address: Some("192.168.1.2".to_string()),
|
||||
port: Some(8080),
|
||||
address: "192.168.1.2".to_string(),
|
||||
port: 8080,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct Helm {
|
||||
pub skip_schema_validation: Option<bool>,
|
||||
pub version: Option<String>,
|
||||
pub kube_version: Option<String>,
|
||||
// pub api_versions: Vec<String>,
|
||||
pub api_versions: Vec<String>,
|
||||
pub namespace: Option<String>,
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
},
|
||||
@@ -181,11 +181,13 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
}
|
||||
|
||||
impl ArgoApplication {
|
||||
pub fn to_yaml(&self, target_namespace: Option<&str>) -> serde_yaml::Value {
|
||||
pub fn to_yaml(&self) -> serde_yaml::Value {
|
||||
let name = &self.name;
|
||||
let default_ns = "argocd".to_string();
|
||||
let namespace: &str =
|
||||
target_namespace.unwrap_or(self.namespace.as_ref().unwrap_or(&default_ns));
|
||||
let namespace = if let Some(ns) = self.namespace.as_ref() {
|
||||
ns
|
||||
} else {
|
||||
"argocd"
|
||||
};
|
||||
let project = &self.project;
|
||||
|
||||
let yaml_str = format!(
|
||||
@@ -283,7 +285,7 @@ mod tests {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -343,7 +345,7 @@ spec:
|
||||
|
||||
assert_eq!(
|
||||
expected_yaml_output.trim(),
|
||||
serde_yaml::to_string(&app.clone().to_yaml(None))
|
||||
serde_yaml::to_string(&app.clone().to_yaml())
|
||||
.unwrap()
|
||||
.trim()
|
||||
);
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_macros::hurl;
|
||||
use kube::{Api, api::GroupVersionKind};
|
||||
use log::{debug, info, trace, warn};
|
||||
use log::{debug, warn};
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::{process::Command, str::FromStr, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
argocd::{ArgoDeploymentType, detect_argo_deployment_type},
|
||||
helm::chart::{HelmChartScore, HelmRepository},
|
||||
},
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
|
||||
topology::{
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
||||
k8s::K8sClient,
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
@@ -24,7 +25,6 @@ use super::ArgoApplication;
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct ArgoHelmScore {
|
||||
pub namespace: String,
|
||||
// TODO: remove and rely on topology (it now knows the flavor)
|
||||
pub openshift: bool,
|
||||
pub argo_apps: Vec<ArgoApplication>,
|
||||
}
|
||||
@@ -55,101 +55,29 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
trace!("Starting ArgoInterpret execution {self:?}");
|
||||
let k8s_client: Arc<K8sClient> = topology.k8s_client().await?;
|
||||
trace!("Got k8s client");
|
||||
let desired_ns = self.score.namespace.clone();
|
||||
|
||||
debug!("ArgoInterpret detecting cluster configuration");
|
||||
let svc = format!("argo-{}", desired_ns);
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let svc = format!("argo-{}", self.score.namespace.clone());
|
||||
let domain = topology.get_domain(&svc).await?;
|
||||
debug!("Resolved Argo service domain for '{}': {}", svc, domain);
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
|
||||
// Detect current Argo deployment type
|
||||
let current = detect_argo_deployment_type(&k8s_client, &desired_ns).await?;
|
||||
info!("Detected Argo deployment type: {:?}", current);
|
||||
helm_score.interpret(inventory, topology).await?;
|
||||
|
||||
// Decide control namespace and whether we must install
|
||||
let (control_ns, must_install) = match current.clone() {
|
||||
ArgoDeploymentType::NotInstalled => {
|
||||
info!(
|
||||
"Argo CD not installed. Will install via Helm into namespace '{}'.",
|
||||
desired_ns
|
||||
);
|
||||
(desired_ns.clone(), true)
|
||||
}
|
||||
ArgoDeploymentType::AvailableInDesiredNamespace(ns) => {
|
||||
info!(
|
||||
"Argo CD already installed by Harmony in '{}'. Skipping install.",
|
||||
ns
|
||||
);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledClusterWide(ns) => {
|
||||
info!(
|
||||
"Argo CD installed cluster-wide in namespace '{}'.",
|
||||
ns
|
||||
);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {
|
||||
// TODO we could support this use case by installing a new argo instance. But that
|
||||
// means handling a few cases that are out of scope for now :
|
||||
// - Wether argo operator is installed
|
||||
// - Managing CRD versions compatibility
|
||||
// - Potentially handling the various k8s flavors and setups we might encounter
|
||||
//
|
||||
// There is a possibility that the helm chart already handles most or even all of these use cases but they are out of scope for now.
|
||||
let msg = format!(
|
||||
"Argo CD found in '{}' but it is namespace-scoped and not supported for attachment yet.",
|
||||
ns
|
||||
);
|
||||
warn!("{}", msg);
|
||||
return Err(InterpretError::new(msg));
|
||||
}
|
||||
};
|
||||
|
||||
info!("ArgoCD will be installed : {must_install} . Current argocd status : {current:?} ");
|
||||
|
||||
if must_install {
|
||||
let helm_score = argo_helm_chart_score(&desired_ns, self.score.openshift, &domain);
|
||||
info!(
|
||||
"Installing Argo CD via Helm into namespace '{}' ...",
|
||||
desired_ns
|
||||
);
|
||||
helm_score.interpret(inventory, topology).await?;
|
||||
info!("Argo CD install complete in '{}'.", desired_ns);
|
||||
}
|
||||
|
||||
let yamls: Vec<serde_yaml::Value> = self
|
||||
.argo_apps
|
||||
.iter()
|
||||
.map(|a| a.to_yaml(Some(&control_ns)))
|
||||
.collect();
|
||||
info!(
|
||||
"Applying {} Argo application object(s) into control namespace '{}'.",
|
||||
yamls.len(),
|
||||
control_ns
|
||||
);
|
||||
k8s_client
|
||||
.apply_yaml_many(&yamls, Some(control_ns.as_str()))
|
||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Failed applying Argo CRs: {e}")))?;
|
||||
.unwrap();
|
||||
|
||||
Ok(Outcome::success_with_details(
|
||||
format!(
|
||||
"ArgoCD {} {}",
|
||||
self.argo_apps.len(),
|
||||
if self.argo_apps.len() == 1 {
|
||||
"application"
|
||||
} else {
|
||||
"applications"
|
||||
match self.argo_apps.len() {
|
||||
1 => "application",
|
||||
_ => "applications",
|
||||
}
|
||||
),
|
||||
vec![
|
||||
format!("control_namespace={}", control_ns),
|
||||
format!("argo ui: http://{}", domain),
|
||||
],
|
||||
vec![format!("argo application: http://{}", domain)],
|
||||
))
|
||||
}
|
||||
|
||||
@@ -158,7 +86,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
Version::from("0.1.0").unwrap()
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
@@ -166,7 +94,39 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl ArgoInterpret {
|
||||
pub async fn get_host_domain(
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
openshift: bool,
|
||||
) -> Result<String, InterpretError> {
|
||||
//This should be the job of the topology to determine if we are in
|
||||
//openshift, potentially we need on openshift topology the same way we create a
|
||||
//localhosttopology
|
||||
match openshift {
|
||||
true => {
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => return Ok(domain.to_string()),
|
||||
None => return Err(InterpretError::new("Could not find domain".to_string())),
|
||||
}
|
||||
}
|
||||
false => {
|
||||
todo!()
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,11 +10,12 @@ use crate::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::application::{
|
||||
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
|
||||
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -46,11 +47,11 @@ use crate::{
|
||||
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
||||
/// - Kubernetes for runtime orchestration
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
|
||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
|
||||
pub application: Arc<A>,
|
||||
}
|
||||
|
||||
impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
|
||||
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
||||
async fn deploy_to_local_k3d(
|
||||
&self,
|
||||
app_name: String,
|
||||
@@ -136,7 +137,7 @@ impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
|
||||
|
||||
#[async_trait]
|
||||
impl<
|
||||
A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
|
||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||
> ApplicationFeature<T> for PackagingDeployment<A>
|
||||
{
|
||||
@@ -145,15 +146,10 @@ impl<
|
||||
topology: &T,
|
||||
) -> Result<InstallationOutcome, InstallationError> {
|
||||
let image = self.application.image_name();
|
||||
|
||||
let domain = if topology.current_target() == DeploymentTarget::Production {
|
||||
self.application.dns()
|
||||
} else {
|
||||
topology
|
||||
let domain = topology
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?
|
||||
};
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
@@ -198,7 +194,7 @@ impl<
|
||||
openshift: true,
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.2.1").unwrap(),
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||
values_overrides: None,
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::sync::Arc;
|
||||
use crate::modules::application::{
|
||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||
};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
|
||||
@@ -2,7 +2,6 @@ mod feature;
|
||||
pub mod features;
|
||||
pub mod oci;
|
||||
mod rust;
|
||||
mod webapp;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use feature::*;
|
||||
|
||||
@@ -16,7 +16,6 @@ use tar::{Builder, Header};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::modules::application::webapp::Webapp;
|
||||
use crate::{score::Score, topology::Topology};
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
@@ -61,10 +60,6 @@ pub struct RustWebapp {
|
||||
pub project_root: PathBuf,
|
||||
pub service_port: u32,
|
||||
pub framework: Option<RustWebFramework>,
|
||||
/// Host name that will be used in production environment.
|
||||
///
|
||||
/// This is the place to put the public host name if this is a public facing webapp.
|
||||
pub dns: String,
|
||||
}
|
||||
|
||||
impl Application for RustWebapp {
|
||||
@@ -73,12 +68,6 @@ impl Application for RustWebapp {
|
||||
}
|
||||
}
|
||||
|
||||
impl Webapp for RustWebapp {
|
||||
fn dns(&self) -> String {
|
||||
self.dns.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(
|
||||
@@ -205,10 +194,10 @@ impl RustWebapp {
|
||||
Some(body_full(tar_data.into())),
|
||||
);
|
||||
|
||||
while let Some(msg) = image_build_stream.next().await {
|
||||
while let Some(mut msg) = image_build_stream.next().await {
|
||||
trace!("Got bollard msg {msg:?}");
|
||||
match msg {
|
||||
Ok(msg) => {
|
||||
Ok(mut msg) => {
|
||||
if let Some(progress) = msg.progress_detail {
|
||||
info!(
|
||||
"Build progress {}/{}",
|
||||
@@ -268,6 +257,7 @@ impl RustWebapp {
|
||||
".harmony_generated",
|
||||
"harmony",
|
||||
"node_modules",
|
||||
"Dockerfile.harmony",
|
||||
];
|
||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||
.into_iter()
|
||||
@@ -471,53 +461,52 @@ impl RustWebapp {
|
||||
|
||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||
|
||||
let app_name = &self.name;
|
||||
let service_port = self.service_port;
|
||||
// Create Chart.yaml
|
||||
let chart_yaml = format!(
|
||||
r#"
|
||||
apiVersion: v2
|
||||
name: {chart_name}
|
||||
description: A Helm chart for the {app_name} web application.
|
||||
name: {}
|
||||
description: A Helm chart for the {} web application.
|
||||
type: application
|
||||
version: 0.2.1
|
||||
appVersion: "{image_tag}"
|
||||
version: 0.1.0
|
||||
appVersion: "{}"
|
||||
"#,
|
||||
chart_name, self.name, image_tag
|
||||
);
|
||||
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
|
||||
|
||||
// Create values.yaml
|
||||
let values_yaml = format!(
|
||||
r#"
|
||||
# Default values for {chart_name}.
|
||||
# Default values for {}.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: {image_repo}
|
||||
repository: {}
|
||||
pullPolicy: IfNotPresent
|
||||
# Overridden by the chart's appVersion
|
||||
tag: "{image_tag}"
|
||||
tag: "{}"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: {service_port}
|
||||
port: {}
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
tls: true
|
||||
# Annotations for cert-manager to handle SSL.
|
||||
annotations:
|
||||
# Add other annotations like nginx ingress class if needed
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
hosts:
|
||||
- host: {domain}
|
||||
- host: {}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
"#,
|
||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
||||
);
|
||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||
|
||||
@@ -594,11 +583,7 @@ spec:
|
||||
);
|
||||
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
||||
|
||||
let service_port = self.service_port;
|
||||
|
||||
// Create templates/ingress.yaml
|
||||
// TODO get issuer name and tls config from topology as it may be different from one
|
||||
// cluster to another, also from one version to another
|
||||
let ingress_yaml = format!(
|
||||
r#"
|
||||
{{{{- if $.Values.ingress.enabled -}}}}
|
||||
@@ -611,11 +596,13 @@ metadata:
|
||||
spec:
|
||||
{{{{- if $.Values.ingress.tls }}}}
|
||||
tls:
|
||||
- secretName: {{{{ include "chart.fullname" . }}}}-tls
|
||||
hosts:
|
||||
{{{{- range $.Values.ingress.hosts }}}}
|
||||
- {{{{ .host | quote }}}}
|
||||
{{{{- range $.Values.ingress.tls }}}}
|
||||
- hosts:
|
||||
{{{{- range .hosts }}}}
|
||||
- {{{{ . | quote }}}}
|
||||
{{{{- end }}}}
|
||||
secretName: {{{{ .secretName }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
rules:
|
||||
{{{{- range $.Values.ingress.hosts }}}}
|
||||
@@ -629,11 +616,12 @@ spec:
|
||||
service:
|
||||
name: {{{{ include "chart.fullname" $ }}}}
|
||||
port:
|
||||
number: {{{{ $.Values.service.port | default {service_port} }}}}
|
||||
number: {{{{ $.Values.service.port | default {} }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
"#,
|
||||
self.service_port
|
||||
);
|
||||
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
use super::Application;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Webapp: Application {
|
||||
fn dns(&self) -> String;
|
||||
}
|
||||
@@ -1,203 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use log::{debug, info};
|
||||
|
||||
use crate::{interpret::InterpretError, topology::k8s::K8sClient};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ArgoScope {
|
||||
ClusterWide(String),
|
||||
NamespaceScoped(String),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DiscoveredArgo {
|
||||
pub control_namespace: String,
|
||||
pub scope: ArgoScope,
|
||||
pub has_crds: bool,
|
||||
pub has_applicationset: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ArgoDeploymentType {
|
||||
NotInstalled,
|
||||
AvailableInDesiredNamespace(String),
|
||||
InstalledClusterWide(String),
|
||||
InstalledNamespaceScoped(String),
|
||||
}
|
||||
|
||||
pub async fn discover_argo_all(
|
||||
k8s: &Arc<K8sClient>,
|
||||
) -> Result<Vec<DiscoveredArgo>, InterpretError> {
|
||||
use log::{debug, info, trace, warn};
|
||||
|
||||
trace!("Starting Argo discovery");
|
||||
|
||||
// CRDs
|
||||
let mut has_crds = true;
|
||||
let required_crds = vec!["applications.argoproj.io", "appprojects.argoproj.io"];
|
||||
trace!("Checking required Argo CRDs: {:?}", required_crds);
|
||||
|
||||
for crd in required_crds {
|
||||
trace!("Verifying CRD presence: {crd}");
|
||||
let crd_exists = k8s.has_crd(crd).await.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to verify existence of CRD {crd}: {e}"))
|
||||
})?;
|
||||
|
||||
debug!("CRD {crd} exists: {crd_exists}");
|
||||
if !crd_exists {
|
||||
info!(
|
||||
"Missing Argo CRD {crd}, looks like Argo CD is not installed (or partially installed)"
|
||||
);
|
||||
has_crds = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/part-of=argocd"
|
||||
);
|
||||
let mut candidate_namespaces = k8s
|
||||
.list_namespaces_with_healthy_deployments("app.kubernetes.io/part-of=argocd")
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?;
|
||||
trace!(
|
||||
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/name=argo-cd"
|
||||
);
|
||||
candidate_namespaces.append(
|
||||
&mut k8s
|
||||
.list_namespaces_with_healthy_deployments("app.kubernetes.io/name=argo-cd")
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?,
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Discovered {} candidate namespace(s) for Argo CD: {:?}",
|
||||
candidate_namespaces.len(),
|
||||
candidate_namespaces
|
||||
);
|
||||
|
||||
let mut found = Vec::new();
|
||||
for ns in candidate_namespaces {
|
||||
trace!("Evaluating namespace '{ns}' for Argo CD instance");
|
||||
|
||||
// Require the application-controller to be healthy (sanity check)
|
||||
trace!(
|
||||
"Checking healthy deployment with label app.kubernetes.io/name=argocd-application-controller in namespace '{ns}'"
|
||||
);
|
||||
let controller_ok = k8s
|
||||
.has_healthy_deployment_with_label(
|
||||
&ns,
|
||||
"app.kubernetes.io/name=argocd-application-controller",
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(
|
||||
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||
);
|
||||
false
|
||||
}) || k8s
|
||||
.has_healthy_deployment_with_label(
|
||||
&ns,
|
||||
"app.kubernetes.io/component=controller",
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(
|
||||
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||
);
|
||||
false
|
||||
});
|
||||
debug!("Namespace '{ns}': application-controller healthy = {controller_ok}");
|
||||
|
||||
if !controller_ok {
|
||||
trace!("Skipping namespace '{ns}' because application-controller is not healthy");
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
|
||||
let scope = match k8s.is_argocd_cluster_wide(&ns).await {
|
||||
Ok(true) => {
|
||||
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
|
||||
ArgoScope::ClusterWide(ns.to_string())
|
||||
}
|
||||
Ok(false) => {
|
||||
debug!("Namespace '{ns}' identified as namespace-scoped Argo CD control plane");
|
||||
ArgoScope::NamespaceScoped(ns.to_string())
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to determine Argo CD scope for namespace '{ns}': {e}. Assuming namespace-scoped."
|
||||
);
|
||||
ArgoScope::NamespaceScoped(ns.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
trace!("Checking optional ApplicationSet CRD (applicationsets.argoproj.io)");
|
||||
let has_applicationset = match k8s.has_crd("applicationsets.argoproj.io").await {
|
||||
Ok(v) => {
|
||||
debug!("applicationsets.argoproj.io present: {v}");
|
||||
v
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to check applicationsets.argoproj.io CRD: {e}. Assuming absent.");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
let argo = DiscoveredArgo {
|
||||
control_namespace: ns.clone(),
|
||||
scope,
|
||||
has_crds,
|
||||
has_applicationset,
|
||||
};
|
||||
|
||||
debug!("Discovered Argo instance in '{ns}': {argo:?}");
|
||||
found.push(argo);
|
||||
}
|
||||
|
||||
if found.is_empty() {
|
||||
info!("No Argo CD installations discovered");
|
||||
} else {
|
||||
info!(
|
||||
"Argo CD discovery complete: {} instance(s) found",
|
||||
found.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(found)
|
||||
}
|
||||
|
||||
pub async fn detect_argo_deployment_type(
|
||||
k8s: &Arc<K8sClient>,
|
||||
desired_namespace: &str,
|
||||
) -> Result<ArgoDeploymentType, InterpretError> {
|
||||
let discovered = discover_argo_all(k8s).await?;
|
||||
debug!("Discovered argo instances {discovered:?}");
|
||||
|
||||
if discovered.is_empty() {
|
||||
return Ok(ArgoDeploymentType::NotInstalled);
|
||||
}
|
||||
|
||||
if let Some(d) = discovered
|
||||
.iter()
|
||||
.find(|d| d.control_namespace == desired_namespace)
|
||||
{
|
||||
return Ok(ArgoDeploymentType::AvailableInDesiredNamespace(
|
||||
d.control_namespace.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(d) = discovered
|
||||
.iter()
|
||||
.find(|d| matches!(d.scope, ArgoScope::ClusterWide(_)))
|
||||
{
|
||||
return Ok(ArgoDeploymentType::InstalledClusterWide(
|
||||
d.control_namespace.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(ArgoDeploymentType::InstalledNamespaceScoped(
|
||||
discovered[0].control_namespace.clone(),
|
||||
))
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use kube::{CustomResource, api::ObjectMeta};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ClusterIssuerScore {
|
||||
email: String,
|
||||
server: String,
|
||||
issuer_name: String,
|
||||
namespace: String,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore {
|
||||
fn name(&self) -> String {
|
||||
"ClusterIssuerScore".to_string()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(ClusterIssuerInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ClusterIssuerInterpret {
|
||||
score: ClusterIssuerScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.apply_cluster_issuer(topology.k8s_client().await.unwrap())
|
||||
.await
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("ClusterIssuer")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterIssuerInterpret {
|
||||
async fn validate_cert_manager(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let cert_manager = "cert-manager".to_string();
|
||||
let operator_namespace = "openshift-operators".to_string();
|
||||
match client
|
||||
.get_deployment(&cert_manager, Some(&operator_namespace))
|
||||
.await
|
||||
{
|
||||
Ok(Some(deployment)) => {
|
||||
if let Some(status) = deployment.status {
|
||||
let ready_count = status.ready_replicas.unwrap_or(0);
|
||||
if ready_count >= 1 {
|
||||
return Ok(Outcome::success(format!(
|
||||
"'{}' is ready with {} replica(s).",
|
||||
&cert_manager, ready_count
|
||||
)));
|
||||
} else {
|
||||
return Err(InterpretError::new(
|
||||
"cert-manager operator not ready in cluster".to_string(),
|
||||
));
|
||||
}
|
||||
} else {
|
||||
Err(InterpretError::new(format!(
|
||||
"failed to get deployment status {} in ns {}",
|
||||
&cert_manager, &operator_namespace
|
||||
)))
|
||||
}
|
||||
}
|
||||
Ok(None) => Err(InterpretError::new(format!(
|
||||
"Deployment '{}' not found in namespace '{}'.",
|
||||
&cert_manager, &operator_namespace
|
||||
))),
|
||||
Err(e) => Err(InterpretError::new(format!(
|
||||
"Failed to query for deployment '{}': {}",
|
||||
&cert_manager, e
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> {
|
||||
let issuer_name = &self.score.issuer_name;
|
||||
let email = &self.score.email;
|
||||
let server = &self.score.server;
|
||||
let namespace = &self.score.namespace;
|
||||
let cluster_issuer = ClusterIssuer {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(issuer_name.to_string()),
|
||||
namespace: Some(namespace.to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: ClusterIssuerSpec {
|
||||
acme: AcmeSpec {
|
||||
email: email.to_string(),
|
||||
private_key_secret_ref: PrivateKeySecretRef {
|
||||
name: issuer_name.to_string(),
|
||||
},
|
||||
server: server.to_string(),
|
||||
solvers: vec![SolverSpec {
|
||||
http01: Some(Http01Solver {
|
||||
ingress: Http01Ingress {
|
||||
class: "nginx".to_string(),
|
||||
},
|
||||
}),
|
||||
}],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
Ok(cluster_issuer)
|
||||
}
|
||||
|
||||
pub async fn apply_cluster_issuer(
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let namespace = self.score.namespace.clone();
|
||||
self.validate_cert_manager(&client).await?;
|
||||
let cluster_issuer = self.build_cluster_issuer().unwrap();
|
||||
client
|
||||
.apply_yaml(
|
||||
&serde_yaml::to_value(cluster_issuer).unwrap(),
|
||||
Some(&namespace),
|
||||
)
|
||||
.await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed cluster operator: {} in namespace: {}",
|
||||
self.score.issuer_name, self.score.namespace
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[kube(
|
||||
group = "cert-manager.io",
|
||||
version = "v1",
|
||||
kind = "ClusterIssuer",
|
||||
plural = "clusterissuers"
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClusterIssuerSpec {
|
||||
pub acme: AcmeSpec,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AcmeSpec {
|
||||
pub email: String,
|
||||
pub private_key_secret_ref: PrivateKeySecretRef,
|
||||
pub server: String,
|
||||
pub solvers: Vec<SolverSpec>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PrivateKeySecretRef {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SolverSpec {
|
||||
pub http01: Option<Http01Solver>,
|
||||
// Other solver types (e.g., dns01) would go here as Options
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Http01Solver {
|
||||
pub ingress: Http01Ingress,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Http01Ingress {
|
||||
pub class: String,
|
||||
}
|
||||
@@ -1,3 +1,2 @@
|
||||
pub mod cluster_issuer;
|
||||
mod helm;
|
||||
pub use helm::*;
|
||||
|
||||
@@ -90,12 +90,12 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
// refactoring to do it now
|
||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||
storage_drives,
|
||||
storage_controller: _,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset: _,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface: _,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
} = host;
|
||||
|
||||
|
||||
@@ -17,4 +17,3 @@ pub mod prometheus;
|
||||
pub mod storage;
|
||||
pub mod tenant;
|
||||
pub mod tftp;
|
||||
pub mod argocd;
|
||||
|
||||
@@ -1,187 +0,0 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
modules::monitoring::kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus, crd_prometheuses::LabelSelector,
|
||||
},
|
||||
topology::oberservability::monitoring::ScrapeTarget,
|
||||
};
|
||||
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "monitoring.coreos.com",
|
||||
version = "v1alpha1",
|
||||
kind = "ScrapeConfig",
|
||||
plural = "scrapeconfigs",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ScrapeConfigSpec {
|
||||
/// List of static configurations.
|
||||
pub static_configs: Option<Vec<StaticConfig>>,
|
||||
|
||||
/// Kubernetes service discovery.
|
||||
pub kubernetes_sd_configs: Option<Vec<KubernetesSDConfig>>,
|
||||
|
||||
/// HTTP-based service discovery.
|
||||
pub http_sd_configs: Option<Vec<HttpSDConfig>>,
|
||||
|
||||
/// File-based service discovery.
|
||||
pub file_sd_configs: Option<Vec<FileSDConfig>>,
|
||||
|
||||
/// DNS-based service discovery.
|
||||
pub dns_sd_configs: Option<Vec<DnsSDConfig>>,
|
||||
|
||||
/// Consul service discovery.
|
||||
pub consul_sd_configs: Option<Vec<ConsulSDConfig>>,
|
||||
|
||||
/// Relabeling configuration applied to discovered targets.
|
||||
pub relabel_configs: Option<Vec<RelabelConfig>>,
|
||||
|
||||
/// Metric relabeling configuration applied to scraped samples.
|
||||
pub metric_relabel_configs: Option<Vec<RelabelConfig>>,
|
||||
|
||||
/// Path to scrape metrics from (defaults to `/metrics`).
|
||||
pub metrics_path: Option<String>,
|
||||
|
||||
/// Interval at which Prometheus scrapes targets (e.g., "30s").
|
||||
pub scrape_interval: Option<String>,
|
||||
|
||||
/// Timeout for scraping (e.g., "10s").
|
||||
pub scrape_timeout: Option<String>,
|
||||
|
||||
/// Optional job name override.
|
||||
pub job_name: Option<String>,
|
||||
|
||||
/// Optional scheme (http or https).
|
||||
pub scheme: Option<String>,
|
||||
|
||||
/// Authorization paramaters for snmp walk
|
||||
pub params: Option<Params>,
|
||||
}
|
||||
|
||||
/// Static configuration section of a ScrapeConfig.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct StaticConfig {
|
||||
pub targets: Vec<String>,
|
||||
|
||||
pub labels: Option<LabelSelector>,
|
||||
}
|
||||
|
||||
/// Relabeling configuration for target or metric relabeling.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RelabelConfig {
|
||||
pub source_labels: Option<Vec<String>>,
|
||||
pub separator: Option<String>,
|
||||
pub target_label: Option<String>,
|
||||
pub regex: Option<String>,
|
||||
pub modulus: Option<u64>,
|
||||
pub replacement: Option<String>,
|
||||
pub action: Option<String>,
|
||||
}
|
||||
|
||||
/// Kubernetes service discovery configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesSDConfig {
|
||||
///"pod", "service", "endpoints"pub role: String,
|
||||
pub namespaces: Option<NamespaceSelector>,
|
||||
pub selectors: Option<Vec<LabelSelector>>,
|
||||
pub api_server: Option<String>,
|
||||
pub bearer_token_file: Option<String>,
|
||||
pub tls_config: Option<TLSConfig>,
|
||||
}
|
||||
|
||||
/// Namespace selector for Kubernetes service discovery.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NamespaceSelector {
|
||||
pub any: Option<bool>,
|
||||
pub match_names: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
/// HTTP-based service discovery configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HttpSDConfig {
|
||||
pub url: String,
|
||||
pub refresh_interval: Option<String>,
|
||||
pub basic_auth: Option<BasicAuth>,
|
||||
pub authorization: Option<Authorization>,
|
||||
pub tls_config: Option<TLSConfig>,
|
||||
}
|
||||
|
||||
/// File-based service discovery configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct FileSDConfig {
|
||||
pub files: Vec<String>,
|
||||
pub refresh_interval: Option<String>,
|
||||
}
|
||||
|
||||
/// DNS-based service discovery configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct DnsSDConfig {
|
||||
pub names: Vec<String>,
|
||||
pub refresh_interval: Option<String>,
|
||||
pub type_: Option<String>, // SRV, A, AAAA
|
||||
pub port: Option<u16>,
|
||||
}
|
||||
|
||||
/// Consul service discovery configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ConsulSDConfig {
|
||||
pub server: String,
|
||||
pub services: Option<Vec<String>>,
|
||||
pub scheme: Option<String>,
|
||||
pub datacenter: Option<String>,
|
||||
pub tag_separator: Option<String>,
|
||||
pub refresh_interval: Option<String>,
|
||||
pub tls_config: Option<TLSConfig>,
|
||||
}
|
||||
|
||||
/// Basic authentication credentials.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BasicAuth {
|
||||
pub username: String,
|
||||
pub password: Option<String>,
|
||||
pub password_file: Option<String>,
|
||||
}
|
||||
|
||||
/// Bearer token or other auth mechanisms.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Authorization {
|
||||
pub credentials: Option<String>,
|
||||
pub credentials_file: Option<String>,
|
||||
pub type_: Option<String>,
|
||||
}
|
||||
|
||||
/// TLS configuration for secure scraping.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TLSConfig {
|
||||
pub ca_file: Option<String>,
|
||||
pub cert_file: Option<String>,
|
||||
pub key_file: Option<String>,
|
||||
pub server_name: Option<String>,
|
||||
pub insecure_skip_verify: Option<bool>,
|
||||
}
|
||||
|
||||
/// Authorization parameters for SNMP walk.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Params {
|
||||
pub auth: Option<Vec<String>>,
|
||||
pub module: Option<Vec<String>>,
|
||||
}
|
||||
@@ -4,7 +4,6 @@ pub mod crd_default_rules;
|
||||
pub mod crd_grafana;
|
||||
pub mod crd_prometheus_rules;
|
||||
pub mod crd_prometheuses;
|
||||
pub mod crd_scrape_config;
|
||||
pub mod grafana_default_dashboard;
|
||||
pub mod grafana_operator;
|
||||
pub mod prometheus_operator;
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
LabelSelector, PrometheusSpec,
|
||||
};
|
||||
|
||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
|
||||
@@ -31,7 +31,6 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
||||
sender: KubePrometheus { config },
|
||||
receivers: self.receivers.clone(),
|
||||
rules: self.rules.clone(),
|
||||
scrape_targets: None,
|
||||
})
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
|
||||
@@ -6,4 +6,3 @@ pub mod kube_prometheus;
|
||||
pub mod ntfy;
|
||||
pub mod okd;
|
||||
pub mod prometheus;
|
||||
pub mod scrape_target;
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
pub mod server;
|
||||
@@ -1,76 +0,0 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
modules::monitoring::kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus,
|
||||
crd_scrape_config::{Params, RelabelConfig, ScrapeConfig, ScrapeConfigSpec, StaticConfig},
|
||||
},
|
||||
topology::oberservability::monitoring::ScrapeTarget,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Server {
|
||||
pub name: String,
|
||||
pub ip: IpAddr,
|
||||
pub auth: String,
|
||||
pub module: String,
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ScrapeTarget<CRDPrometheus> for Server {
|
||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||
let scrape_config_spec = ScrapeConfigSpec {
|
||||
static_configs: Some(vec![StaticConfig {
|
||||
targets: vec![self.ip.to_string()],
|
||||
labels: None,
|
||||
}]),
|
||||
scrape_interval: Some("2m".to_string()),
|
||||
kubernetes_sd_configs: None,
|
||||
http_sd_configs: None,
|
||||
file_sd_configs: None,
|
||||
dns_sd_configs: None,
|
||||
params: Some(Params {
|
||||
auth: Some(vec![self.auth.clone()]),
|
||||
module: Some(vec![self.module.clone()]),
|
||||
}),
|
||||
consul_sd_configs: None,
|
||||
relabel_configs: Some(vec![RelabelConfig {
|
||||
action: None,
|
||||
source_labels: Some(vec!["__address__".to_string()]),
|
||||
separator: None,
|
||||
target_label: Some("__param_target".to_string()),
|
||||
regex: None,
|
||||
replacement: Some(format!("snmp.{}:31080", self.domain.clone())),
|
||||
modulus: None,
|
||||
}]),
|
||||
metric_relabel_configs: None,
|
||||
metrics_path: Some("/snmp".to_string()),
|
||||
scrape_timeout: Some("2m".to_string()),
|
||||
job_name: Some(format!("snmp_exporter/cloud/{}", self.name.clone())),
|
||||
scheme: None,
|
||||
};
|
||||
|
||||
let scrape_config = ScrapeConfig {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.name.clone()),
|
||||
namespace: Some(sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: scrape_config_spec,
|
||||
};
|
||||
sender
|
||||
.client
|
||||
.apply(&scrape_config, Some(&sender.namespace.clone()))
|
||||
.await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"installed scrape target {}",
|
||||
self.name.clone()
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -215,7 +215,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
) -> Result<(), InterpretError> {
|
||||
info!("[ControlPlane] Ensuring persistent bonding");
|
||||
let score = HostNetworkConfigurationScore {
|
||||
hosts: hosts.clone(),
|
||||
hosts: hosts.clone(), // FIXME: Avoid clone if possible
|
||||
};
|
||||
score.interpret(inventory, topology).await?;
|
||||
|
||||
|
||||
@@ -77,8 +77,6 @@ impl OKDBootstrapLoadBalancerScore {
|
||||
address: topology.bootstrap_host.ip.to_string(),
|
||||
port,
|
||||
});
|
||||
|
||||
backend.dedup();
|
||||
backend
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,7 +240,7 @@ pub struct OvsPortSpec {
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct EthtoolSpec {
|
||||
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||
// FIXME: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
|
||||
@@ -50,7 +50,6 @@ impl HostNetworkConfigurationInterpret {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
||||
&self,
|
||||
topology: &T,
|
||||
@@ -126,6 +125,7 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
|
||||
let mut configured_host_count = 0;
|
||||
for host in &self.score.hosts {
|
||||
// FIXME: Clear the previous config for host
|
||||
self.configure_network_for_host(topology, host).await?;
|
||||
configured_host_count += 1;
|
||||
}
|
||||
@@ -283,6 +283,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
||||
// FIXME: Should it still configure an empty bond/port channel?
|
||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||
let topology = TopologyWithSwitch::new_port_not_found();
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::{
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::{debug, warn};
|
||||
use log::{info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::time::sleep;
|
||||
|
||||
@@ -19,8 +19,8 @@ use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CephRemoveOsd {
|
||||
pub osd_deployment_name: String,
|
||||
pub rook_ceph_namespace: String,
|
||||
osd_deployment_name: String,
|
||||
rook_ceph_namespace: String,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
||||
@@ -54,17 +54,18 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
||||
self.verify_deployment_scaled(client.clone()).await?;
|
||||
self.delete_deployment(client.clone()).await?;
|
||||
self.verify_deployment_deleted(client.clone()).await?;
|
||||
self.purge_ceph_osd(client.clone()).await?;
|
||||
self.verify_ceph_osd_removal(client.clone()).await?;
|
||||
|
||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
|
||||
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
||||
osd_id_full, self.score.osd_deployment_name
|
||||
)))
|
||||
}
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::CephRemoveOsd
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
@@ -81,7 +82,7 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
||||
}
|
||||
|
||||
impl CephRemoveOsdInterpret {
|
||||
pub fn get_ceph_osd_id_numeric(&self) -> Result<String, InterpretError> {
|
||||
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
||||
let osd_id_numeric = self
|
||||
.score
|
||||
.osd_deployment_name
|
||||
@@ -93,14 +94,9 @@ impl CephRemoveOsdInterpret {
|
||||
self.score.osd_deployment_name
|
||||
))
|
||||
})?;
|
||||
Ok(osd_id_numeric.to_string())
|
||||
}
|
||||
|
||||
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
||||
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
||||
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
||||
|
||||
debug!(
|
||||
info!(
|
||||
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
||||
osd_id_full, self.score.osd_deployment_name
|
||||
);
|
||||
@@ -112,7 +108,6 @@ impl CephRemoveOsdInterpret {
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
debug!("verifying toolbox exists");
|
||||
let toolbox_dep = "rook-ceph-tools".to_string();
|
||||
|
||||
match client
|
||||
@@ -154,7 +149,7 @@ impl CephRemoveOsdInterpret {
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
debug!(
|
||||
info!(
|
||||
"Scaling down OSD deployment: {}",
|
||||
self.score.osd_deployment_name
|
||||
);
|
||||
@@ -177,7 +172,7 @@ impl CephRemoveOsdInterpret {
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let (timeout, interval, start) = self.build_timer();
|
||||
|
||||
debug!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||
loop {
|
||||
let dep = client
|
||||
.get_deployment(
|
||||
@@ -185,9 +180,11 @@ impl CephRemoveOsdInterpret {
|
||||
Some(&self.score.rook_ceph_namespace),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(deployment) = dep {
|
||||
if let Some(status) = deployment.status {
|
||||
if status.replicas == None && status.ready_replicas == None {
|
||||
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
|
||||
{
|
||||
return Ok(Outcome::success(
|
||||
"Deployment successfully scaled down.".to_string(),
|
||||
));
|
||||
@@ -215,7 +212,7 @@ impl CephRemoveOsdInterpret {
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
debug!(
|
||||
info!(
|
||||
"Deleting OSD deployment: {}",
|
||||
self.score.osd_deployment_name
|
||||
);
|
||||
@@ -237,7 +234,7 @@ impl CephRemoveOsdInterpret {
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let (timeout, interval, start) = self.build_timer();
|
||||
|
||||
debug!("Verifying OSD deployment deleted");
|
||||
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
||||
loop {
|
||||
let dep = client
|
||||
.get_deployment(
|
||||
@@ -247,7 +244,7 @@ impl CephRemoveOsdInterpret {
|
||||
.await?;
|
||||
|
||||
if dep.is_none() {
|
||||
debug!(
|
||||
info!(
|
||||
"Deployment {} successfully deleted.",
|
||||
self.score.osd_deployment_name
|
||||
);
|
||||
@@ -279,10 +276,12 @@ impl CephRemoveOsdInterpret {
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
pub async fn purge_ceph_osd(&self, client: Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||
let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
|
||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||
debug!(
|
||||
pub async fn purge_ceph_osd(
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
osd_id_full: &str,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!(
|
||||
"Purging OSD {} from Ceph cluster and removing its auth key",
|
||||
osd_id_full
|
||||
);
|
||||
@@ -292,9 +291,8 @@ impl CephRemoveOsdInterpret {
|
||||
"app".to_string(),
|
||||
Some(&self.score.rook_ceph_namespace),
|
||||
vec![
|
||||
"sh",
|
||||
"-c",
|
||||
format!("ceph osd purge {osd_id_numeric} --yes-i-really-mean-it && ceph auth del {osd_id_full}").as_str(),
|
||||
format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
|
||||
format!("ceph auth del osd.{osd_id_full}").as_str(),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
@@ -307,10 +305,10 @@ impl CephRemoveOsdInterpret {
|
||||
pub async fn verify_ceph_osd_removal(
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
osd_id_full: &str,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let (timeout, interval, start) = self.build_timer();
|
||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
||||
debug!(
|
||||
info!(
|
||||
"Verifying OSD {} has been removed from the Ceph tree...",
|
||||
osd_id_full
|
||||
);
|
||||
@@ -320,7 +318,7 @@ impl CephRemoveOsdInterpret {
|
||||
"rook-ceph-tools".to_string(),
|
||||
"app".to_string(),
|
||||
Some(&self.score.rook_ceph_namespace),
|
||||
vec!["sh", "-c", "ceph osd tree -f json"],
|
||||
vec!["ceph osd tree -f json"],
|
||||
)
|
||||
.await?;
|
||||
let tree =
|
||||
@@ -1,2 +1,2 @@
|
||||
pub mod ceph_remove_osd_score;
|
||||
pub mod ceph_osd_replacement_score;
|
||||
pub mod ceph_validate_health_score;
|
||||
|
||||
@@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub struct HAProxyId(String);
|
||||
|
||||
impl Default for HAProxyId {
|
||||
@@ -297,7 +297,7 @@ pub struct HAProxyFrontends {
|
||||
pub frontend: Vec<Frontend>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Frontend {
|
||||
#[yaserde(attribute = true)]
|
||||
pub uuid: String,
|
||||
@@ -310,7 +310,7 @@ pub struct Frontend {
|
||||
pub bind_options: MaybeString,
|
||||
pub mode: String,
|
||||
#[yaserde(rename = "defaultBackend")]
|
||||
pub default_backend: Option<String>,
|
||||
pub default_backend: String,
|
||||
pub ssl_enabled: i32,
|
||||
pub ssl_certificates: MaybeString,
|
||||
pub ssl_default_certificate: MaybeString,
|
||||
@@ -416,7 +416,7 @@ pub struct HAProxyBackends {
|
||||
pub backends: Vec<HAProxyBackend>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct HAProxyBackend {
|
||||
#[yaserde(attribute = true, rename = "uuid")]
|
||||
pub uuid: String,
|
||||
@@ -535,7 +535,7 @@ pub struct HAProxyServers {
|
||||
pub servers: Vec<HAProxyServer>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct HAProxyServer {
|
||||
#[yaserde(attribute = true, rename = "uuid")]
|
||||
pub uuid: String,
|
||||
@@ -543,8 +543,8 @@ pub struct HAProxyServer {
|
||||
pub enabled: u8,
|
||||
pub name: String,
|
||||
pub description: MaybeString,
|
||||
pub address: Option<String>,
|
||||
pub port: Option<u16>,
|
||||
pub address: String,
|
||||
pub port: u16,
|
||||
pub checkport: MaybeString,
|
||||
pub mode: String,
|
||||
pub multiplexer_protocol: MaybeString,
|
||||
@@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks {
|
||||
pub healthchecks: Vec<HAProxyHealthCheck>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct HAProxyHealthCheck {
|
||||
#[yaserde(attribute = true)]
|
||||
pub uuid: String,
|
||||
|
||||
@@ -25,7 +25,6 @@ sha2 = "0.10.9"
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions.workspace = true
|
||||
assertor.workspace = true
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }
|
||||
|
||||
@@ -30,7 +30,8 @@ impl SshConfigManager {
|
||||
|
||||
self.opnsense_shell
|
||||
.exec(&format!(
|
||||
"cp /conf/config.xml /conf/backup/{backup_filename}"
|
||||
"cp /conf/config.xml /conf/backup/{}",
|
||||
backup_filename
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
mod ssh;
|
||||
use crate::Error;
|
||||
use async_trait::async_trait;
|
||||
pub use ssh::*;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::Error;
|
||||
|
||||
#[async_trait]
|
||||
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {
|
||||
async fn exec(&self, command: &str) -> Result<String, Error>;
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
use crate::{config::OPNsenseShell, Error};
|
||||
use std::sync::Arc;
|
||||
|
||||
use log::warn;
|
||||
use opnsense_config_xml::{
|
||||
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
|
||||
};
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
use crate::{config::OPNsenseShell, Error};
|
||||
|
||||
pub struct LoadBalancerConfig<'a> {
|
||||
opnsense: &'a mut OPNsense,
|
||||
@@ -28,7 +31,7 @@ impl<'a> LoadBalancerConfig<'a> {
|
||||
match &mut self.opnsense.opnsense.haproxy.as_mut() {
|
||||
Some(haproxy) => f(haproxy),
|
||||
None => unimplemented!(
|
||||
"Cannot configure load balancer when haproxy config does not exist yet"
|
||||
"Adding a backend is not supported when haproxy config does not exist yet"
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -37,67 +40,21 @@ impl<'a> LoadBalancerConfig<'a> {
|
||||
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
|
||||
}
|
||||
|
||||
/// Configures a service by removing any existing service on the same port
|
||||
/// and then adding the new definition. This ensures idempotency.
|
||||
pub fn configure_service(
|
||||
&mut self,
|
||||
frontend: Frontend,
|
||||
backend: HAProxyBackend,
|
||||
servers: Vec<HAProxyServer>,
|
||||
healthcheck: Option<HAProxyHealthCheck>,
|
||||
) {
|
||||
self.remove_service_by_bind_address(&frontend.bind);
|
||||
self.remove_servers(&servers);
|
||||
|
||||
self.add_new_service(frontend, backend, servers, healthcheck);
|
||||
pub fn add_backend(&mut self, backend: HAProxyBackend) {
|
||||
warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks");
|
||||
self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend));
|
||||
}
|
||||
|
||||
// Remove the corresponding real servers based on their name if they already exist.
|
||||
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
|
||||
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
|
||||
self.with_haproxy(|haproxy| {
|
||||
haproxy
|
||||
.servers
|
||||
.servers
|
||||
.retain(|s| !server_names.contains(&s.name));
|
||||
});
|
||||
pub fn add_frontend(&mut self, frontend: Frontend) {
|
||||
self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend));
|
||||
}
|
||||
|
||||
/// Removes a service and its dependent components based on the frontend's bind address.
|
||||
/// This performs a cascading delete of the frontend, backend, servers, and health check.
|
||||
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
|
||||
self.with_haproxy(|haproxy| {
|
||||
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
|
||||
return;
|
||||
};
|
||||
|
||||
remove_healthcheck(haproxy, &old_backend);
|
||||
remove_linked_servers(haproxy, &old_backend);
|
||||
});
|
||||
pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) {
|
||||
self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck));
|
||||
}
|
||||
|
||||
/// Adds the components of a new service to the HAProxy configuration.
|
||||
/// This function de-duplicates servers by name to prevent configuration errors.
|
||||
fn add_new_service(
|
||||
&mut self,
|
||||
frontend: Frontend,
|
||||
backend: HAProxyBackend,
|
||||
servers: Vec<HAProxyServer>,
|
||||
healthcheck: Option<HAProxyHealthCheck>,
|
||||
) {
|
||||
self.with_haproxy(|haproxy| {
|
||||
if let Some(check) = healthcheck {
|
||||
haproxy.healthchecks.healthchecks.push(check);
|
||||
}
|
||||
|
||||
haproxy.servers.servers.extend(servers);
|
||||
haproxy.backends.backends.push(backend);
|
||||
haproxy.frontends.frontend.push(frontend);
|
||||
});
|
||||
pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) {
|
||||
self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers));
|
||||
}
|
||||
|
||||
pub async fn reload_restart(&self) -> Result<(), Error> {
|
||||
@@ -125,262 +82,3 @@ impl<'a> LoadBalancerConfig<'a> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
|
||||
let pos = haproxy
|
||||
.frontends
|
||||
.frontend
|
||||
.iter()
|
||||
.position(|f| f.bind == bind_address);
|
||||
|
||||
match pos {
|
||||
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
|
||||
let default_backend = old_frontend.default_backend?;
|
||||
let pos = haproxy
|
||||
.backends
|
||||
.backends
|
||||
.iter()
|
||||
.position(|b| b.uuid == default_backend);
|
||||
|
||||
match pos {
|
||||
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
|
||||
None => None, // orphaned frontend, shouldn't happen
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
||||
if let Some(uuid) = &backend.health_check.content {
|
||||
haproxy
|
||||
.healthchecks
|
||||
.healthchecks
|
||||
.retain(|h| h.uuid != *uuid);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the backend's servers. This assumes servers are not shared between services.
|
||||
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
||||
if let Some(server_uuids_str) = &backend.linked_servers.content {
|
||||
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
|
||||
haproxy
|
||||
.servers
|
||||
.servers
|
||||
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::config::DummyOPNSenseShell;
|
||||
use assertor::*;
|
||||
use opnsense_config_xml::{
|
||||
Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
|
||||
HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::LoadBalancerConfig;
|
||||
|
||||
static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
|
||||
static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
|
||||
|
||||
static SERVER_ADDRESS: &str = "1.1.1.1:80";
|
||||
static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
|
||||
|
||||
#[test]
|
||||
fn configure_service_should_add_all_service_components_to_haproxy() {
|
||||
let mut opnsense = given_opnsense();
|
||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
||||
let (healthcheck, servers, backend, frontend) =
|
||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
||||
|
||||
load_balancer.configure_service(
|
||||
frontend.clone(),
|
||||
backend.clone(),
|
||||
servers.clone(),
|
||||
Some(healthcheck.clone()),
|
||||
);
|
||||
|
||||
assert_haproxy_configured_with(
|
||||
opnsense,
|
||||
vec![frontend],
|
||||
vec![backend],
|
||||
servers,
|
||||
vec![healthcheck],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn configure_service_should_replace_service_on_same_bind_address() {
|
||||
let (healthcheck, servers, backend, frontend) =
|
||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
||||
let mut opnsense = given_opnsense_with(given_haproxy(
|
||||
vec![frontend.clone()],
|
||||
vec![backend.clone()],
|
||||
servers.clone(),
|
||||
vec![healthcheck.clone()],
|
||||
));
|
||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
||||
|
||||
let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
|
||||
given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
||||
|
||||
load_balancer.configure_service(
|
||||
updated_frontend.clone(),
|
||||
updated_backend.clone(),
|
||||
updated_servers.clone(),
|
||||
Some(updated_healthcheck.clone()),
|
||||
);
|
||||
|
||||
assert_haproxy_configured_with(
|
||||
opnsense,
|
||||
vec![updated_frontend],
|
||||
vec![updated_backend],
|
||||
updated_servers,
|
||||
vec![updated_healthcheck],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
|
||||
let (healthcheck, servers, backend, frontend) =
|
||||
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
||||
let (other_healthcheck, other_servers, other_backend, other_frontend) =
|
||||
given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
||||
let mut opnsense = given_opnsense_with(given_haproxy(
|
||||
vec![frontend.clone()],
|
||||
vec![backend.clone()],
|
||||
servers.clone(),
|
||||
vec![healthcheck.clone()],
|
||||
));
|
||||
let mut load_balancer = given_load_balancer(&mut opnsense);
|
||||
|
||||
load_balancer.configure_service(
|
||||
other_frontend.clone(),
|
||||
other_backend.clone(),
|
||||
other_servers.clone(),
|
||||
Some(other_healthcheck.clone()),
|
||||
);
|
||||
|
||||
assert_haproxy_configured_with(
|
||||
opnsense,
|
||||
vec![frontend, other_frontend],
|
||||
vec![backend, other_backend],
|
||||
[servers, other_servers].concat(),
|
||||
vec![healthcheck, other_healthcheck],
|
||||
);
|
||||
}
|
||||
|
||||
fn assert_haproxy_configured_with(
|
||||
opnsense: OPNsense,
|
||||
frontends: Vec<Frontend>,
|
||||
backends: Vec<HAProxyBackend>,
|
||||
servers: Vec<HAProxyServer>,
|
||||
healthchecks: Vec<HAProxyHealthCheck>,
|
||||
) {
|
||||
let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
|
||||
assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
|
||||
assert_that!(haproxy.backends.backends).contains_exactly(backends);
|
||||
assert_that!(haproxy.servers.servers).is_equal_to(servers);
|
||||
assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
|
||||
}
|
||||
|
||||
fn given_opnsense() -> OPNsense {
|
||||
OPNsense::default()
|
||||
}
|
||||
|
||||
fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
|
||||
let mut opnsense = OPNsense::default();
|
||||
opnsense.opnsense.haproxy = Some(haproxy);
|
||||
|
||||
opnsense
|
||||
}
|
||||
|
||||
fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
|
||||
let opnsense_shell = Arc::new(DummyOPNSenseShell {});
|
||||
if opnsense.opnsense.haproxy.is_none() {
|
||||
opnsense.opnsense.haproxy = Some(HAProxy::default());
|
||||
}
|
||||
LoadBalancerConfig::new(opnsense, opnsense_shell)
|
||||
}
|
||||
|
||||
fn given_service(
|
||||
bind_address: &str,
|
||||
server_address: &str,
|
||||
) -> (
|
||||
HAProxyHealthCheck,
|
||||
Vec<HAProxyServer>,
|
||||
HAProxyBackend,
|
||||
Frontend,
|
||||
) {
|
||||
let healthcheck = given_healthcheck();
|
||||
let servers = vec![given_server(server_address)];
|
||||
let backend = given_backend();
|
||||
let frontend = given_frontend(bind_address);
|
||||
(healthcheck, servers, backend, frontend)
|
||||
}
|
||||
|
||||
fn given_haproxy(
|
||||
frontends: Vec<Frontend>,
|
||||
backends: Vec<HAProxyBackend>,
|
||||
servers: Vec<HAProxyServer>,
|
||||
healthchecks: Vec<HAProxyHealthCheck>,
|
||||
) -> HAProxy {
|
||||
HAProxy {
|
||||
frontends: HAProxyFrontends {
|
||||
frontend: frontends,
|
||||
},
|
||||
backends: HAProxyBackends { backends },
|
||||
servers: HAProxyServers { servers },
|
||||
healthchecks: HAProxyHealthChecks { healthchecks },
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn given_frontend(bind_address: &str) -> Frontend {
|
||||
Frontend {
|
||||
uuid: "uuid".into(),
|
||||
id: HAProxyId::default(),
|
||||
enabled: 1,
|
||||
name: format!("frontend_{bind_address}"),
|
||||
bind: bind_address.into(),
|
||||
default_backend: Some("backend-uuid".into()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn given_backend() -> HAProxyBackend {
|
||||
HAProxyBackend {
|
||||
uuid: "backend-uuid".into(),
|
||||
id: HAProxyId::default(),
|
||||
enabled: 1,
|
||||
name: "backend_192.168.1.1:80".into(),
|
||||
linked_servers: MaybeString::from("server-uuid"),
|
||||
health_check_enabled: 1,
|
||||
health_check: MaybeString::from("healthcheck-uuid"),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn given_server(address: &str) -> HAProxyServer {
|
||||
HAProxyServer {
|
||||
uuid: "server-uuid".into(),
|
||||
id: HAProxyId::default(),
|
||||
name: address.into(),
|
||||
address: Some(address.into()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn given_healthcheck() -> HAProxyHealthCheck {
|
||||
HAProxyHealthCheck {
|
||||
uuid: "healthcheck-uuid".into(),
|
||||
name: "healthcheck".into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user