Compare commits
1 Commits
ad61be277b
...
feat/multi
| Author | SHA1 | Date | |
|---|---|---|---|
| ec794f076e |
57
Cargo.lock
generated
@@ -429,15 +429,6 @@ dependencies = [
|
|||||||
"wait-timeout",
|
"wait-timeout",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "assertor"
|
|
||||||
version = "0.0.4"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb"
|
|
||||||
dependencies = [
|
|
||||||
"num-traits",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-broadcast"
|
name = "async-broadcast"
|
||||||
version = "0.7.2"
|
version = "0.7.2"
|
||||||
@@ -674,20 +665,6 @@ dependencies = [
|
|||||||
"serde_with",
|
"serde_with",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "brocade"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"async-trait",
|
|
||||||
"env_logger",
|
|
||||||
"harmony_types",
|
|
||||||
"log",
|
|
||||||
"regex",
|
|
||||||
"russh",
|
|
||||||
"russh-keys",
|
|
||||||
"tokio",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "brotli"
|
name = "brotli"
|
||||||
version = "8.0.2"
|
version = "8.0.2"
|
||||||
@@ -2328,11 +2305,9 @@ name = "harmony"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"askama",
|
"askama",
|
||||||
"assertor",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"bollard",
|
"bollard",
|
||||||
"brocade",
|
|
||||||
"chrono",
|
"chrono",
|
||||||
"cidr",
|
"cidr",
|
||||||
"convert_case",
|
"convert_case",
|
||||||
@@ -2429,17 +2404,6 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "harmony_derive"
|
|
||||||
version = "0.1.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "2d138bbb32bb346299c5f95fbb53532313f39927cb47c411c99c634ef8665ef7"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"syn 1.0.109",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "harmony_inventory_agent"
|
name = "harmony_inventory_agent"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -3886,19 +3850,6 @@ dependencies = [
|
|||||||
"web-time",
|
"web-time",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "okd_host_network"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"harmony",
|
|
||||||
"harmony_cli",
|
|
||||||
"harmony_derive",
|
|
||||||
"harmony_inventory_agent",
|
|
||||||
"harmony_macros",
|
|
||||||
"harmony_types",
|
|
||||||
"tokio",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "once_cell"
|
name = "once_cell"
|
||||||
version = "1.21.3"
|
version = "1.21.3"
|
||||||
@@ -4586,9 +4537,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "1.11.3"
|
version = "1.11.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c"
|
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aho-corasick 1.1.3",
|
"aho-corasick 1.1.3",
|
||||||
"memchr",
|
"memchr",
|
||||||
@@ -4598,9 +4549,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex-automata"
|
name = "regex-automata"
|
||||||
version = "0.4.11"
|
version = "0.4.10"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad"
|
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aho-corasick 1.1.3",
|
"aho-corasick 1.1.3",
|
||||||
"memchr",
|
"memchr",
|
||||||
|
|||||||
14
Cargo.toml
@@ -14,8 +14,7 @@ members = [
|
|||||||
"harmony_composer",
|
"harmony_composer",
|
||||||
"harmony_inventory_agent",
|
"harmony_inventory_agent",
|
||||||
"harmony_secret_derive",
|
"harmony_secret_derive",
|
||||||
"harmony_secret",
|
"harmony_secret", "adr/agent_discovery/mdns",
|
||||||
"adr/agent_discovery/mdns", "brocade",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -67,12 +66,5 @@ thiserror = "2.0.14"
|
|||||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||||
serde_json = "1.0.127"
|
serde_json = "1.0.127"
|
||||||
askama = "0.14"
|
askama = "0.14"
|
||||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
|
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
||||||
reqwest = { version = "0.12", features = [
|
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
||||||
"blocking",
|
|
||||||
"stream",
|
|
||||||
"rustls-tls",
|
|
||||||
"http2",
|
|
||||||
"json",
|
|
||||||
], default-features = false }
|
|
||||||
assertor = "0.0.4"
|
|
||||||
|
|||||||
69
README.md
@@ -36,59 +36,48 @@ These principles surface as simple, ergonomic Rust APIs that let teams focus on
|
|||||||
|
|
||||||
## 2 · Quick Start
|
## 2 · Quick Start
|
||||||
|
|
||||||
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
use harmony::{
|
use harmony::{
|
||||||
|
data::Version,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
application::{
|
lamp::{LAMPConfig, LAMPScore},
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
|
||||||
},
|
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::{K8sAnywhereTopology, Url},
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
|
||||||
use std::{path::PathBuf, sync::Arc};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
// 1. Describe what you want
|
||||||
name: "harmony-example-leptos".to_string(),
|
let lamp_stack = LAMPScore {
|
||||||
project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder
|
name: "harmony-lamp-demo".into(),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
|
||||||
service_port: 8080,
|
php_version: Version::from("8.3.0").unwrap(),
|
||||||
});
|
config: LAMPConfig {
|
||||||
|
project_root: "./php".into(),
|
||||||
// Define your Application deployment and the features you want
|
database_size: "4Gi".into(),
|
||||||
let app = ApplicationScore {
|
..Default::default()
|
||||||
features: vec![
|
},
|
||||||
Box::new(PackagingDeployment {
|
|
||||||
application: application.clone(),
|
|
||||||
}),
|
|
||||||
Box::new(Monitoring {
|
|
||||||
application: application.clone(),
|
|
||||||
alert_receiver: vec![
|
|
||||||
Box::new(DiscordWebhook {
|
|
||||||
name: "test-discord".to_string(),
|
|
||||||
url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
application,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// 2. Enhance with extra scores (monitoring, CI/CD, …)
|
||||||
|
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||||
|
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||||
|
|
||||||
|
// 3. Run your scores on the desired topology & inventory
|
||||||
harmony_cli::run(
|
harmony_cli::run(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster
|
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||||
vec![Box::new(app)],
|
vec![
|
||||||
None,
|
Box::new(lamp_stack),
|
||||||
)
|
Box::new(monitoring)
|
||||||
.await
|
],
|
||||||
.unwrap();
|
None
|
||||||
|
).await.unwrap();
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "brocade"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
async-trait.workspace = true
|
|
||||||
harmony_types = { path = "../harmony_types" }
|
|
||||||
russh.workspace = true
|
|
||||||
russh-keys.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
log.workspace = true
|
|
||||||
env_logger.workspace = true
|
|
||||||
regex = "1.11.3"
|
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
use std::net::{IpAddr, Ipv4Addr};
|
|
||||||
|
|
||||||
use harmony_types::switch::PortLocation;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
env_logger::init();
|
|
||||||
|
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
|
||||||
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
|
||||||
let switch_addresses = vec![ip];
|
|
||||||
|
|
||||||
let brocade = brocade::init(&switch_addresses, 22, "admin", "password", None)
|
|
||||||
.await
|
|
||||||
.expect("Brocade client failed to connect");
|
|
||||||
|
|
||||||
let version = brocade.version().await.unwrap();
|
|
||||||
println!("Version: {version:?}");
|
|
||||||
|
|
||||||
println!("--------------");
|
|
||||||
println!("Showing MAC Address table...");
|
|
||||||
|
|
||||||
let mac_adddresses = brocade.show_mac_address_table().await.unwrap();
|
|
||||||
println!("VLAN\tMAC\t\t\tPORT");
|
|
||||||
for mac in mac_adddresses {
|
|
||||||
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("--------------");
|
|
||||||
let channel_name = "HARMONY_LAG";
|
|
||||||
println!("Clearing port channel '{channel_name}'...");
|
|
||||||
|
|
||||||
brocade.clear_port_channel(channel_name).await.unwrap();
|
|
||||||
|
|
||||||
println!("Cleared");
|
|
||||||
|
|
||||||
println!("--------------");
|
|
||||||
println!("Finding next available channel...");
|
|
||||||
|
|
||||||
let channel_id = brocade.find_available_channel_id().await.unwrap();
|
|
||||||
println!("Channel id: {channel_id}");
|
|
||||||
|
|
||||||
println!("--------------");
|
|
||||||
let channel_name = "HARMONY_LAG";
|
|
||||||
let ports = [PortLocation(1, 1, 3), PortLocation(1, 1, 4)];
|
|
||||||
println!("Creating port channel '{channel_name}' with ports {ports:?}'...");
|
|
||||||
|
|
||||||
brocade
|
|
||||||
.create_port_channel(channel_id, channel_name, &ports)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
println!("Created");
|
|
||||||
}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
use super::BrocadeClient;
|
|
||||||
use crate::{
|
|
||||||
BrocadeInfo, Error, ExecutionMode, MacAddressEntry, PortChannelId, parse_brocade_mac_address,
|
|
||||||
shell::BrocadeShell,
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
|
||||||
use log::{debug, info};
|
|
||||||
use regex::Regex;
|
|
||||||
use std::{collections::HashSet, str::FromStr};
|
|
||||||
|
|
||||||
pub struct FastIronClient {
|
|
||||||
pub shell: BrocadeShell,
|
|
||||||
pub version: BrocadeInfo,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastIronClient {
|
|
||||||
pub fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
|
||||||
debug!("[Brocade] Parsing mac address entry: {line}");
|
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
||||||
if parts.len() < 3 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (vlan, mac_address, port) = match parts.len() {
|
|
||||||
3 => (
|
|
||||||
u16::from_str(parts[0]).ok()?,
|
|
||||||
parse_brocade_mac_address(parts[1]).ok()?,
|
|
||||||
parts[2].to_string(),
|
|
||||||
),
|
|
||||||
_ => (
|
|
||||||
1,
|
|
||||||
parse_brocade_mac_address(parts[0]).ok()?,
|
|
||||||
parts[1].to_string(),
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
let port =
|
|
||||||
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
|
||||||
|
|
||||||
match port {
|
|
||||||
Ok(p) => Some(Ok(MacAddressEntry {
|
|
||||||
vlan,
|
|
||||||
mac_address,
|
|
||||||
port: p,
|
|
||||||
})),
|
|
||||||
Err(e) => Some(Err(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_port_channel_commands(
|
|
||||||
&self,
|
|
||||||
channel_name: &str,
|
|
||||||
channel_id: u8,
|
|
||||||
ports: &[PortLocation],
|
|
||||||
) -> Vec<String> {
|
|
||||||
let mut commands = vec![
|
|
||||||
"configure terminal".to_string(),
|
|
||||||
format!("lag {channel_name} static id {channel_id}"),
|
|
||||||
];
|
|
||||||
|
|
||||||
for port in ports {
|
|
||||||
commands.push(format!("ports ethernet {port}"));
|
|
||||||
}
|
|
||||||
|
|
||||||
commands.push(format!("primary-port {}", ports[0]));
|
|
||||||
commands.push("deploy".into());
|
|
||||||
commands.push("exit".into());
|
|
||||||
commands.push("write memory".into());
|
|
||||||
commands.push("exit".into());
|
|
||||||
|
|
||||||
commands
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl BrocadeClient for FastIronClient {
|
|
||||||
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
|
||||||
Ok(self.version.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn show_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
|
||||||
info!("[Brocade] Showing MAC address table...");
|
|
||||||
|
|
||||||
let output = self
|
|
||||||
.shell
|
|
||||||
.run_command("show mac-address", ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
output
|
|
||||||
.lines()
|
|
||||||
.skip(2)
|
|
||||||
.filter_map(|line| self.parse_mac_entry(line))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
|
||||||
info!("[Brocade] Finding next available channel id...");
|
|
||||||
|
|
||||||
let output = self
|
|
||||||
.shell
|
|
||||||
.run_command("show lag", ExecutionMode::Regular)
|
|
||||||
.await?;
|
|
||||||
let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex");
|
|
||||||
|
|
||||||
let used_ids: HashSet<u8> = output
|
|
||||||
.lines()
|
|
||||||
.filter_map(|line| {
|
|
||||||
re.captures(line)
|
|
||||||
.and_then(|c| c.get(1))
|
|
||||||
.and_then(|id_match| id_match.as_str().parse().ok())
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut next_id: u8 = 1;
|
|
||||||
loop {
|
|
||||||
if !used_ids.contains(&next_id) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
next_id += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("[Brocade] Found channel id: {next_id}");
|
|
||||||
Ok(next_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_port_channel(
|
|
||||||
&self,
|
|
||||||
channel_id: PortChannelId,
|
|
||||||
channel_name: &str,
|
|
||||||
ports: &[PortLocation],
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
info!(
|
|
||||||
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
|
||||||
);
|
|
||||||
|
|
||||||
let commands = self.build_port_channel_commands(channel_name, channel_id, ports);
|
|
||||||
self.shell
|
|
||||||
.run_commands(commands, ExecutionMode::Privileged)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
|
||||||
debug!("[Brocade] Clearing port-channel: {channel_name}");
|
|
||||||
|
|
||||||
let commands = vec![
|
|
||||||
"configure terminal".to_string(),
|
|
||||||
format!("no lag {channel_name}"),
|
|
||||||
"write memory".to_string(),
|
|
||||||
];
|
|
||||||
self.shell
|
|
||||||
.run_commands(commands, ExecutionMode::Privileged)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,203 +0,0 @@
|
|||||||
use std::net::IpAddr;
|
|
||||||
use std::{
|
|
||||||
fmt::{self, Display},
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
fast_iron::FastIronClient,
|
|
||||||
shell::{BrocadeSession, BrocadeShell},
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::net::MacAddress;
|
|
||||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
|
||||||
use regex::Regex;
|
|
||||||
|
|
||||||
mod fast_iron;
|
|
||||||
mod shell;
|
|
||||||
mod ssh;
|
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug)]
|
|
||||||
pub struct BrocadeOptions {
|
|
||||||
pub dry_run: bool,
|
|
||||||
pub ssh: ssh::SshOptions,
|
|
||||||
pub timeouts: TimeoutConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct TimeoutConfig {
|
|
||||||
pub shell_ready: Duration,
|
|
||||||
pub command_execution: Duration,
|
|
||||||
pub cleanup: Duration,
|
|
||||||
pub message_wait: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for TimeoutConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
shell_ready: Duration::from_secs(3),
|
|
||||||
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
|
||||||
cleanup: Duration::from_secs(10),
|
|
||||||
message_wait: Duration::from_millis(500),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ExecutionMode {
|
|
||||||
Regular,
|
|
||||||
Privileged,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct BrocadeInfo {
|
|
||||||
os: BrocadeOs,
|
|
||||||
version: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum BrocadeOs {
|
|
||||||
NetworkOperatingSystem,
|
|
||||||
FastIron,
|
|
||||||
Unknown,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
|
||||||
pub struct MacAddressEntry {
|
|
||||||
pub vlan: u16,
|
|
||||||
pub mac_address: MacAddress,
|
|
||||||
pub port: PortDeclaration,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type PortChannelId = u8;
|
|
||||||
|
|
||||||
pub async fn init(
|
|
||||||
ip_addresses: &[IpAddr],
|
|
||||||
port: u16,
|
|
||||||
username: &str,
|
|
||||||
password: &str,
|
|
||||||
options: Option<BrocadeOptions>,
|
|
||||||
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
|
||||||
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
|
||||||
|
|
||||||
let version_info = shell
|
|
||||||
.with_session(ExecutionMode::Regular, |session| {
|
|
||||||
Box::pin(get_brocade_info(session))
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(match version_info.os {
|
|
||||||
BrocadeOs::FastIron => Box::new(FastIronClient {
|
|
||||||
shell,
|
|
||||||
version: version_info,
|
|
||||||
}),
|
|
||||||
BrocadeOs::NetworkOperatingSystem => todo!(),
|
|
||||||
BrocadeOs::Unknown => todo!(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait BrocadeClient {
|
|
||||||
async fn version(&self) -> Result<BrocadeInfo, Error>;
|
|
||||||
|
|
||||||
async fn show_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
|
|
||||||
|
|
||||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
|
|
||||||
|
|
||||||
async fn create_port_channel(
|
|
||||||
&self,
|
|
||||||
channel_id: PortChannelId,
|
|
||||||
channel_name: &str,
|
|
||||||
ports: &[PortLocation],
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
|
|
||||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> {
|
|
||||||
let output = session.run_command("show version").await?;
|
|
||||||
|
|
||||||
if output.contains("Network Operating System") {
|
|
||||||
let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
|
||||||
.expect("Invalid regex");
|
|
||||||
let version = re
|
|
||||||
.captures(&output)
|
|
||||||
.and_then(|cap| cap.name("version"))
|
|
||||||
.map(|m| m.as_str().to_string())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
return Ok(BrocadeInfo {
|
|
||||||
os: BrocadeOs::NetworkOperatingSystem,
|
|
||||||
version,
|
|
||||||
});
|
|
||||||
} else if output.contains("ICX") {
|
|
||||||
let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
|
||||||
.expect("Invalid regex");
|
|
||||||
let version = re
|
|
||||||
.captures(&output)
|
|
||||||
.and_then(|cap| cap.name("version"))
|
|
||||||
.map(|m| m.as_str().to_string())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
return Ok(BrocadeInfo {
|
|
||||||
os: BrocadeOs::FastIron,
|
|
||||||
version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(Error::UnexpectedError("Unknown Brocade OS version".into()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
|
|
||||||
let cleaned_mac = value.replace('.', "");
|
|
||||||
|
|
||||||
if cleaned_mac.len() != 12 {
|
|
||||||
return Err(format!("Invalid MAC address: {value}"));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut bytes = [0u8; 6];
|
|
||||||
for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() {
|
|
||||||
let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?;
|
|
||||||
bytes[i] =
|
|
||||||
u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(MacAddress(bytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
NetworkError(String),
|
|
||||||
AuthenticationError(String),
|
|
||||||
ConfigurationError(String),
|
|
||||||
TimeoutError(String),
|
|
||||||
UnexpectedError(String),
|
|
||||||
CommandError(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Error::NetworkError(msg) => write!(f, "Network error: {msg}"),
|
|
||||||
Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"),
|
|
||||||
Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"),
|
|
||||||
Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"),
|
|
||||||
Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"),
|
|
||||||
Error::CommandError(msg) => write!(f, "{msg}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Error> for String {
|
|
||||||
fn from(val: Error) -> Self {
|
|
||||||
format!("{val}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::error::Error for Error {}
|
|
||||||
|
|
||||||
impl From<russh::Error> for Error {
|
|
||||||
fn from(value: russh::Error) -> Self {
|
|
||||||
Error::NetworkError(format!("Russh client error: {value}"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,330 +0,0 @@
|
|||||||
use std::net::IpAddr;
|
|
||||||
use std::time::Duration;
|
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
use crate::BrocadeOptions;
|
|
||||||
use crate::Error;
|
|
||||||
use crate::ExecutionMode;
|
|
||||||
use crate::TimeoutConfig;
|
|
||||||
use crate::ssh;
|
|
||||||
|
|
||||||
use log::debug;
|
|
||||||
use log::info;
|
|
||||||
use russh::ChannelMsg;
|
|
||||||
use tokio::time::timeout;
|
|
||||||
|
|
||||||
pub struct BrocadeShell {
|
|
||||||
pub ip: IpAddr,
|
|
||||||
pub port: u16,
|
|
||||||
pub username: String,
|
|
||||||
pub password: String,
|
|
||||||
pub options: BrocadeOptions,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BrocadeShell {
|
|
||||||
pub async fn init(
|
|
||||||
ip_addresses: &[IpAddr],
|
|
||||||
port: u16,
|
|
||||||
username: &str,
|
|
||||||
password: &str,
|
|
||||||
options: Option<BrocadeOptions>,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let ip = ip_addresses
|
|
||||||
.first()
|
|
||||||
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
|
||||||
|
|
||||||
let base_options = options.unwrap_or_default();
|
|
||||||
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
ip: *ip,
|
|
||||||
port,
|
|
||||||
username: username.to_string(),
|
|
||||||
password: password.to_string(),
|
|
||||||
options,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
|
||||||
BrocadeSession::open(
|
|
||||||
self.ip,
|
|
||||||
self.port,
|
|
||||||
&self.username,
|
|
||||||
&self.password,
|
|
||||||
self.options.clone(),
|
|
||||||
mode,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error>
|
|
||||||
where
|
|
||||||
F: FnOnce(
|
|
||||||
&mut BrocadeSession,
|
|
||||||
) -> std::pin::Pin<
|
|
||||||
Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>,
|
|
||||||
>,
|
|
||||||
{
|
|
||||||
let mut session = self.open_session(mode).await?;
|
|
||||||
let result = callback(&mut session).await;
|
|
||||||
session.close().await?;
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
|
|
||||||
let mut session = self.open_session(mode).await?;
|
|
||||||
let result = session.run_command(command).await;
|
|
||||||
session.close().await?;
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_commands(
|
|
||||||
&self,
|
|
||||||
commands: Vec<String>,
|
|
||||||
mode: ExecutionMode,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut session = self.open_session(mode).await?;
|
|
||||||
let result = session.run_commands(commands).await;
|
|
||||||
session.close().await?;
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct BrocadeSession {
|
|
||||||
pub channel: russh::Channel<russh::client::Msg>,
|
|
||||||
pub mode: ExecutionMode,
|
|
||||||
pub options: BrocadeOptions,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BrocadeSession {
|
|
||||||
pub async fn open(
|
|
||||||
ip: IpAddr,
|
|
||||||
port: u16,
|
|
||||||
username: &str,
|
|
||||||
password: &str,
|
|
||||||
options: BrocadeOptions,
|
|
||||||
mode: ExecutionMode,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let client = ssh::create_client(ip, port, username, password, &options).await?;
|
|
||||||
let mut channel = client.channel_open_session().await?;
|
|
||||||
|
|
||||||
channel
|
|
||||||
.request_pty(false, "vt100", 80, 24, 0, 0, &[])
|
|
||||||
.await?;
|
|
||||||
channel.request_shell(false).await?;
|
|
||||||
|
|
||||||
wait_for_shell_ready(&mut channel, &options.timeouts).await?;
|
|
||||||
|
|
||||||
if let ExecutionMode::Privileged = mode {
|
|
||||||
try_elevate_session(&mut channel, username, password, &options.timeouts).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
channel,
|
|
||||||
mode,
|
|
||||||
options,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn close(&mut self) -> Result<(), Error> {
|
|
||||||
debug!("[Brocade] Closing session...");
|
|
||||||
|
|
||||||
self.channel.data(&b"exit\n"[..]).await?;
|
|
||||||
if let ExecutionMode::Privileged = self.mode {
|
|
||||||
self.channel.data(&b"exit\n"[..]).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
while start.elapsed() < self.options.timeouts.cleanup {
|
|
||||||
match timeout(self.options.timeouts.message_wait, self.channel.wait()).await {
|
|
||||||
Ok(Some(ChannelMsg::Close)) => break,
|
|
||||||
Ok(Some(_)) => continue,
|
|
||||||
Ok(None) | Err(_) => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("[Brocade] Session closed.");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
|
|
||||||
debug!("[Brocade] Running command: '{command}'...");
|
|
||||||
|
|
||||||
self.channel
|
|
||||||
.data(format!("{}\n", command).as_bytes())
|
|
||||||
.await?;
|
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
||||||
|
|
||||||
let output = self.collect_command_output().await?;
|
|
||||||
let output = String::from_utf8(output)
|
|
||||||
.map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?;
|
|
||||||
|
|
||||||
self.check_for_command_errors(&output, command)?;
|
|
||||||
Ok(output)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> {
|
|
||||||
for command in commands {
|
|
||||||
self.run_command(&command).await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
|
|
||||||
let mut output = Vec::new();
|
|
||||||
let start = Instant::now();
|
|
||||||
let read_timeout = Duration::from_millis(500);
|
|
||||||
let log_interval = Duration::from_secs(3);
|
|
||||||
let mut last_log = Instant::now();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if start.elapsed() > self.options.timeouts.command_execution {
|
|
||||||
return Err(Error::TimeoutError(
|
|
||||||
"Timeout waiting for command completion.".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if start.elapsed() > Duration::from_secs(5) && last_log.elapsed() > log_interval {
|
|
||||||
info!("[Brocade] Waiting for command output...");
|
|
||||||
last_log = Instant::now();
|
|
||||||
}
|
|
||||||
|
|
||||||
match timeout(read_timeout, self.channel.wait()).await {
|
|
||||||
Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => {
|
|
||||||
output.extend_from_slice(&data);
|
|
||||||
let current_output = String::from_utf8_lossy(&output);
|
|
||||||
if current_output.contains('>') || current_output.contains('#') {
|
|
||||||
return Ok(output);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output),
|
|
||||||
Ok(Some(ChannelMsg::ExitStatus { exit_status })) => {
|
|
||||||
debug!("[Brocade] Command exit status: {exit_status}");
|
|
||||||
}
|
|
||||||
Ok(Some(_)) => continue,
|
|
||||||
Ok(None) | Err(_) => {
|
|
||||||
if output.is_empty() {
|
|
||||||
if let Ok(None) = timeout(read_timeout, self.channel.wait()).await {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
||||||
let current_output = String::from_utf8_lossy(&output);
|
|
||||||
if current_output.contains('>') || current_output.contains('#') {
|
|
||||||
return Ok(output);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(output)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
|
|
||||||
const ERROR_PATTERNS: &[&str] = &[
|
|
||||||
"invalid input",
|
|
||||||
"syntax error",
|
|
||||||
"command not found",
|
|
||||||
"unknown command",
|
|
||||||
"permission denied",
|
|
||||||
"access denied",
|
|
||||||
"authentication failed",
|
|
||||||
"configuration error",
|
|
||||||
"failed to",
|
|
||||||
"error:",
|
|
||||||
];
|
|
||||||
|
|
||||||
let output_lower = output.to_lowercase();
|
|
||||||
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
|
||||||
return Err(Error::CommandError(format!(
|
|
||||||
"Command '{command}' failed: {}",
|
|
||||||
output.trim()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !command.starts_with("show") && output.trim().is_empty() {
|
|
||||||
return Err(Error::CommandError(format!(
|
|
||||||
"Command '{command}' produced no output"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn wait_for_shell_ready(
|
|
||||||
channel: &mut russh::Channel<russh::client::Msg>,
|
|
||||||
timeouts: &TimeoutConfig,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let start = Instant::now();
|
|
||||||
|
|
||||||
while start.elapsed() < timeouts.shell_ready {
|
|
||||||
match timeout(timeouts.message_wait, channel.wait()).await {
|
|
||||||
Ok(Some(ChannelMsg::Data { data })) => {
|
|
||||||
buffer.extend_from_slice(&data);
|
|
||||||
let output = String::from_utf8_lossy(&buffer);
|
|
||||||
if output.ends_with('>') || output.ends_with('#') {
|
|
||||||
debug!("[Brocade] Shell ready");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Some(_)) => continue,
|
|
||||||
Ok(None) => break,
|
|
||||||
Err(_) => continue,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn try_elevate_session(
|
|
||||||
channel: &mut russh::Channel<russh::client::Msg>,
|
|
||||||
username: &str,
|
|
||||||
password: &str,
|
|
||||||
timeouts: &TimeoutConfig,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
channel.data(&b"enable\n"[..]).await?;
|
|
||||||
let start = Instant::now();
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
|
|
||||||
while start.elapsed() < timeouts.shell_ready {
|
|
||||||
match timeout(timeouts.message_wait, channel.wait()).await {
|
|
||||||
Ok(Some(ChannelMsg::Data { data })) => {
|
|
||||||
buffer.extend_from_slice(&data);
|
|
||||||
let output = String::from_utf8_lossy(&buffer);
|
|
||||||
|
|
||||||
if output.ends_with('#') {
|
|
||||||
debug!("[Brocade] Privileged mode established");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if output.contains("User Name:") {
|
|
||||||
channel.data(format!("{}\n", username).as_bytes()).await?;
|
|
||||||
buffer.clear();
|
|
||||||
} else if output.contains("Password:") {
|
|
||||||
channel.data(format!("{}\n", password).as_bytes()).await?;
|
|
||||||
buffer.clear();
|
|
||||||
} else if output.contains('>') {
|
|
||||||
return Err(Error::AuthenticationError(
|
|
||||||
"Enable authentication failed".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Some(_)) => continue,
|
|
||||||
Ok(None) => break,
|
|
||||||
Err(_) => continue,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let output = String::from_utf8_lossy(&buffer);
|
|
||||||
if output.ends_with('#') {
|
|
||||||
debug!("[Brocade] Privileged mode established");
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::AuthenticationError(format!(
|
|
||||||
"Enable failed. Output:\n{output}"
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,113 +0,0 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use russh::client::Handler;
|
|
||||||
use russh::kex::DH_G1_SHA1;
|
|
||||||
use russh::kex::ECDH_SHA2_NISTP256;
|
|
||||||
use russh_keys::key::SSH_RSA;
|
|
||||||
|
|
||||||
use super::BrocadeOptions;
|
|
||||||
use super::Error;
|
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug)]
|
|
||||||
pub struct SshOptions {
|
|
||||||
pub preferred_algorithms: russh::Preferred,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SshOptions {
|
|
||||||
fn ecdhsa_sha2_nistp256() -> Self {
|
|
||||||
Self {
|
|
||||||
preferred_algorithms: russh::Preferred {
|
|
||||||
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
|
||||||
key: Cow::Borrowed(&[SSH_RSA]),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn legacy() -> Self {
|
|
||||||
Self {
|
|
||||||
preferred_algorithms: russh::Preferred {
|
|
||||||
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
|
||||||
key: Cow::Borrowed(&[SSH_RSA]),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Client;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Handler for Client {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
async fn check_server_key(
|
|
||||||
&mut self,
|
|
||||||
_server_public_key: &russh_keys::key::PublicKey,
|
|
||||||
) -> Result<bool, Self::Error> {
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn try_init_client(
|
|
||||||
username: &str,
|
|
||||||
password: &str,
|
|
||||||
ip: &std::net::IpAddr,
|
|
||||||
base_options: BrocadeOptions,
|
|
||||||
) -> Result<BrocadeOptions, Error> {
|
|
||||||
let ssh_options = vec![
|
|
||||||
SshOptions::default(),
|
|
||||||
SshOptions::ecdhsa_sha2_nistp256(),
|
|
||||||
SshOptions::legacy(),
|
|
||||||
];
|
|
||||||
|
|
||||||
for ssh in ssh_options {
|
|
||||||
let opts = BrocadeOptions {
|
|
||||||
ssh,
|
|
||||||
..base_options.clone()
|
|
||||||
};
|
|
||||||
let client = create_client(*ip, 22, username, password, &opts).await;
|
|
||||||
|
|
||||||
match client {
|
|
||||||
Ok(_) => {
|
|
||||||
return Ok(opts);
|
|
||||||
}
|
|
||||||
Err(e) => match e {
|
|
||||||
Error::NetworkError(e) => {
|
|
||||||
if e.contains("No common key exchange algorithm") {
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
return Err(Error::NetworkError(e));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => return Err(e),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(Error::NetworkError(
|
|
||||||
"Could not establish ssh connection: wrong key exchange algorithm)".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_client(
|
|
||||||
ip: std::net::IpAddr,
|
|
||||||
port: u16,
|
|
||||||
username: &str,
|
|
||||||
password: &str,
|
|
||||||
options: &BrocadeOptions,
|
|
||||||
) -> Result<russh::client::Handle<Client>, Error> {
|
|
||||||
let config = russh::client::Config {
|
|
||||||
preferred: options.ssh.preferred_algorithms.clone(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?;
|
|
||||||
if !client.authenticate_password(username, password).await? {
|
|
||||||
return Err(Error::AuthenticationError(
|
|
||||||
"ssh authentication failed".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(client)
|
|
||||||
}
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
.terraform
|
|
||||||
*.tfstate
|
|
||||||
venv
|
|
||||||
|
Before Width: | Height: | Size: 72 KiB |
|
Before Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 100 KiB |
@@ -1,5 +0,0 @@
|
|||||||
To build :
|
|
||||||
|
|
||||||
```bash
|
|
||||||
npx @marp-team/marp-cli@latest -w slides.md
|
|
||||||
```
|
|
||||||
|
Before Width: | Height: | Size: 11 KiB |
@@ -1,9 +0,0 @@
|
|||||||
To run this :
|
|
||||||
|
|
||||||
```bash
|
|
||||||
virtualenv venv
|
|
||||||
source venv/bin/activate
|
|
||||||
pip install ansible ansible-dev-tools
|
|
||||||
ansible-lint download.yml
|
|
||||||
ansible-playbook -i localhost download.yml
|
|
||||||
```
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
- name: Test Ansible URL Validation
|
|
||||||
hosts: localhost
|
|
||||||
tasks:
|
|
||||||
- name: Download a file
|
|
||||||
ansible.builtin.get_url:
|
|
||||||
url: "http:/wikipedia.org/"
|
|
||||||
dest: "/tmp/ansible-test/wikipedia.html"
|
|
||||||
mode: '0900'
|
|
||||||
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 275 KiB |
|
Before Width: | Height: | Size: 212 KiB |
|
Before Width: | Height: | Size: 384 KiB |
|
Before Width: | Height: | Size: 8.3 KiB |
@@ -1,241 +0,0 @@
|
|||||||
---
|
|
||||||
theme: uncover
|
|
||||||
---
|
|
||||||
|
|
||||||
# Voici l'histoire de Petit Poisson
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./happy_landscape_swimmer.jpg" width="1000"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
|
||||||
|
|
||||||
<img src="./tryrust.org.png" width="600"/>
|
|
||||||
|
|
||||||
[https://tryrust.org](https://tryrust.org)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_1.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_2.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_3.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_4.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Demo time
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_sunglasses.jpg" width="1000"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_download_wikipedia.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible.jpg" width="200"/>
|
|
||||||
|
|
||||||
## Ansible❓
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: Download wikipedia
|
|
||||||
hosts: localhost
|
|
||||||
tasks:
|
|
||||||
- name: Download a file
|
|
||||||
ansible.builtin.get_url:
|
|
||||||
url: "https:/wikipedia.org/"
|
|
||||||
dest: "/tmp/ansible-test/wikipedia.html"
|
|
||||||
mode: '0900'
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
|
||||||
|
|
||||||
```
|
|
||||||
ansible-lint download.yml
|
|
||||||
|
|
||||||
Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'.
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
git push
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./75_years_later.jpg" width="1100"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_download_wikipedia_fail.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed.jpg" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible_output_fail.jpg" width="1100"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible_crossed_out.jpg" width="400"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
<img src="./terraform.jpg" width="400"/>
|
|
||||||
|
|
||||||
## Terraform❓❗
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="200"/>
|
|
||||||
<img src="./terraform.jpg" width="200"/>
|
|
||||||
|
|
||||||
```tf
|
|
||||||
provider "docker" {}
|
|
||||||
|
|
||||||
resource "docker_network" "invalid_network" {
|
|
||||||
name = "my-invalid-network"
|
|
||||||
|
|
||||||
ipam_config {
|
|
||||||
subnet = "172.17.0.0/33"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="100"/>
|
|
||||||
<img src="./terraform.jpg" width="200"/>
|
|
||||||
|
|
||||||
```
|
|
||||||
terraform plan
|
|
||||||
|
|
||||||
Terraform used the selected providers to generate the following execution plan.
|
|
||||||
Resource actions are indicated with the following symbols:
|
|
||||||
+ create
|
|
||||||
|
|
||||||
Terraform will perform the following actions:
|
|
||||||
|
|
||||||
# docker_network.invalid_network will be created
|
|
||||||
+ resource "docker_network" "invalid_network" {
|
|
||||||
+ driver = (known after apply)
|
|
||||||
+ id = (known after apply)
|
|
||||||
+ internal = (known after apply)
|
|
||||||
+ ipam_driver = "default"
|
|
||||||
+ name = "my-invalid-network"
|
|
||||||
+ options = (known after apply)
|
|
||||||
+ scope = (known after apply)
|
|
||||||
|
|
||||||
+ ipam_config {
|
|
||||||
+ subnet = "172.17.0.0/33"
|
|
||||||
# (2 unchanged attributes hidden)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
✅
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
terraform apply
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
|
||||||
|
|
||||||
Do you want to perform these actions?
|
|
||||||
Terraform will perform the actions described above.
|
|
||||||
Only 'yes' will be accepted to approve.
|
|
||||||
|
|
||||||
Enter a value: yes
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
docker_network.invalid_network: Creating...
|
|
||||||
╷
|
|
||||||
│ Error: Unable to create network: Error response from daemon: invalid network config:
|
|
||||||
│ invalid subnet 172.17.0.0/33: invalid CIDR block notation
|
|
||||||
│
|
|
||||||
│ with docker_network.invalid_network,
|
|
||||||
│ on main.tf line 11, in resource "docker_network" "invalid_network":
|
|
||||||
│ 11: resource "docker_network" "invalid_network" {
|
|
||||||
│
|
|
||||||
╵
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="1100"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible_crossed_out.jpg" width="300"/>
|
|
||||||
<img src="./terraform_crossed_out.jpg" width="400"/>
|
|
||||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="300"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Harmony❓❗
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Demo time
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="300"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# 🎼
|
|
||||||
|
|
||||||
Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony)
|
|
||||||
|
|
||||||
|
|
||||||
<img src="./qrcode_gitea_nationtech.png" width="120"/>
|
|
||||||
|
|
||||||
|
|
||||||
LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/)
|
|
||||||
|
|
||||||
Courriel : [jg@nationtech.io](mailto:jg@nationtech.io)
|
|
||||||
|
Before Width: | Height: | Size: 11 KiB |
@@ -1,40 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/http" {
|
|
||||||
version = "3.5.0"
|
|
||||||
hashes = [
|
|
||||||
"h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=",
|
|
||||||
"zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7",
|
|
||||||
"zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736",
|
|
||||||
"zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476",
|
|
||||||
"zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90",
|
|
||||||
"zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a",
|
|
||||||
"zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6",
|
|
||||||
"zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6",
|
|
||||||
"zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee",
|
|
||||||
"zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926",
|
|
||||||
"zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/local" {
|
|
||||||
version = "2.5.3"
|
|
||||||
hashes = [
|
|
||||||
"h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=",
|
|
||||||
"zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927",
|
|
||||||
"zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e",
|
|
||||||
"zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf",
|
|
||||||
"zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d",
|
|
||||||
"zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09",
|
|
||||||
"zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f",
|
|
||||||
"zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1",
|
|
||||||
"zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6",
|
|
||||||
"zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47",
|
|
||||||
"zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
provider "http" {}
|
|
||||||
|
|
||||||
data "http" "remote_file" {
|
|
||||||
url = "http:/example.com/file.txt"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "local_file" "downloaded_file" {
|
|
||||||
content = data.http.remote_file.body
|
|
||||||
filename = "${path.module}/downloaded_file.txt"
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/kreuzwerker/docker" {
|
|
||||||
version = "3.0.2"
|
|
||||||
constraints = "~> 3.0.1"
|
|
||||||
hashes = [
|
|
||||||
"h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=",
|
|
||||||
"zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f",
|
|
||||||
"zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95",
|
|
||||||
"zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138",
|
|
||||||
"zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da",
|
|
||||||
"zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2",
|
|
||||||
"zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06",
|
|
||||||
"zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1",
|
|
||||||
"zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710",
|
|
||||||
"zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005",
|
|
||||||
"zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d",
|
|
||||||
"zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792",
|
|
||||||
"zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387",
|
|
||||||
"zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62",
|
|
||||||
"zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_providers {
|
|
||||||
docker = {
|
|
||||||
source = "kreuzwerker/docker"
|
|
||||||
version = "~> 3.0.1" # Adjust version as needed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
provider "docker" {}
|
|
||||||
|
|
||||||
resource "docker_network" "invalid_network" {
|
|
||||||
name = "my-invalid-network"
|
|
||||||
|
|
||||||
ipam_config {
|
|
||||||
subnet = "172.17.0.0/33"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Before Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 144 KiB |
|
Before Width: | Height: | Size: 58 KiB |
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 71 KiB |
|
Before Width: | Height: | Size: 81 KiB |
|
Before Width: | Height: | Size: 87 KiB |
|
Before Width: | Height: | Size: 88 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 325 KiB |
@@ -27,6 +27,7 @@ async fn main() {
|
|||||||
};
|
};
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "example-monitoring".to_string(),
|
name: "example-monitoring".to_string(),
|
||||||
|
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{Location, SwitchGroup},
|
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp, features::rhob_monitoring::Monitoring,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
|
features::rhob_monitoring::RHOBMonitoring,
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
@@ -16,6 +17,7 @@ use harmony_types::net::Url;
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "test-rhob-monitoring".to_string(),
|
name: "test-rhob-monitoring".to_string(),
|
||||||
|
domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()),
|
||||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
@@ -28,7 +30,7 @@ async fn main() {
|
|||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
features: vec![
|
features: vec![
|
||||||
Box::new(Monitoring {
|
Box::new(RHOBMonitoring {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
alert_receiver: vec![Box::new(discord_receiver)],
|
alert_receiver: vec![Box::new(discord_receiver)],
|
||||||
}),
|
}),
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use harmony::{
|
|||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
features::{Monitoring, PackagingDeployment},
|
features::{ContinuousDelivery, Monitoring},
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::{
|
monitoring::alert_channel::{
|
||||||
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
||||||
@@ -19,6 +19,7 @@ use harmony_macros::hurl;
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "harmony-example-rust-webapp".to_string(),
|
name: "harmony-example-rust-webapp".to_string(),
|
||||||
|
domain: hurl!("https://rustapp.harmony.example.com"),
|
||||||
project_root: PathBuf::from("./webapp"),
|
project_root: PathBuf::from("./webapp"),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
@@ -36,7 +37,7 @@ async fn main() {
|
|||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
features: vec![
|
features: vec![
|
||||||
Box::new(PackagingDeployment {
|
Box::new(ContinuousDelivery {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
}),
|
}),
|
||||||
Box::new(Monitoring {
|
Box::new(Monitoring {
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
harmony
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "harmony-tryrust"
|
|
||||||
edition = "2024"
|
|
||||||
version = "0.1.0"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { path = "../../../nationtech/harmony/harmony" }
|
|
||||||
harmony_cli = { path = "../../../nationtech/harmony/harmony_cli" }
|
|
||||||
harmony_types = { path = "../../../nationtech/harmony/harmony_types" }
|
|
||||||
harmony_macros = { path = "../../../nationtech/harmony/harmony_macros" }
|
|
||||||
tokio = { version = "1.40", features = [
|
|
||||||
"io-std",
|
|
||||||
"fs",
|
|
||||||
"macros",
|
|
||||||
"rt-multi-thread",
|
|
||||||
] }
|
|
||||||
log = { version = "0.4", features = ["kv"] }
|
|
||||||
env_logger = "0.11"
|
|
||||||
url = "2.5"
|
|
||||||
base64 = "0.22.1"
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
use harmony::{
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::{
|
|
||||||
application::{
|
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
|
||||||
},
|
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
|
||||||
},
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
use harmony_macros::hurl;
|
|
||||||
use std::{path::PathBuf, sync::Arc};
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let application = Arc::new(RustWebapp {
|
|
||||||
name: "tryrust".to_string(),
|
|
||||||
project_root: PathBuf::from(".."),
|
|
||||||
framework: Some(RustWebFramework::Leptos),
|
|
||||||
service_port: 8080,
|
|
||||||
});
|
|
||||||
|
|
||||||
let discord_webhook = DiscordWebhook {
|
|
||||||
name: "harmony_demo".to_string(),
|
|
||||||
url: hurl!("http://not_a_url.com"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let app = ApplicationScore {
|
|
||||||
features: vec![
|
|
||||||
Box::new(PackagingDeployment {
|
|
||||||
application: application.clone(),
|
|
||||||
}),
|
|
||||||
Box::new(Monitoring {
|
|
||||||
application: application.clone(),
|
|
||||||
alert_receiver: vec![Box::new(discord_webhook)],
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
application,
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
|
||||||
K8sAnywhereTopology::from_env(),
|
|
||||||
vec![Box::new(app)],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,39 +1,41 @@
|
|||||||
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
features::{ContinuousDelivery, Monitoring},
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
use harmony_types::net::Url;
|
||||||
use std::{path::PathBuf, sync::Arc};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "harmony-example-tryrust".to_string(),
|
name: "harmony-example-tryrust".to_string(),
|
||||||
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()),
|
||||||
// submodule
|
project_root: PathBuf::from("./tryrust.org"),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 8080,
|
service_port: 8080,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Define your Application deployment and the features you want
|
let discord_receiver = DiscordWebhook {
|
||||||
|
name: "test-discord".to_string(),
|
||||||
|
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||||
|
};
|
||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
features: vec![
|
features: vec![
|
||||||
Box::new(PackagingDeployment {
|
Box::new(ContinuousDelivery {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
}),
|
}),
|
||||||
Box::new(Monitoring {
|
Box::new(Monitoring {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
alert_receiver: vec![Box::new(DiscordWebhook {
|
alert_receiver: vec![Box::new(discord_receiver)],
|
||||||
name: "test-discord".to_string(),
|
|
||||||
url: hurl!("https://discord.doesnt.exist.com"),
|
|
||||||
})],
|
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
application,
|
application,
|
||||||
@@ -41,7 +43,7 @@ async fn main() {
|
|||||||
|
|
||||||
harmony_cli::run(
|
harmony_cli::run(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned k3d by default or connect to any kubernetes cluster
|
K8sAnywhereTopology::from_env(),
|
||||||
vec![Box::new(app)],
|
vec![Box::new(app)],
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -10,11 +10,7 @@ testing = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
reqwest = { version = "0.11", features = [
|
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
|
||||||
"blocking",
|
|
||||||
"json",
|
|
||||||
"rustls-tls",
|
|
||||||
], default-features = false }
|
|
||||||
russh = "0.45.0"
|
russh = "0.45.0"
|
||||||
rust-ipmi = "0.1.1"
|
rust-ipmi = "0.1.1"
|
||||||
semver = "1.0.23"
|
semver = "1.0.23"
|
||||||
@@ -77,8 +73,6 @@ harmony_secret = { path = "../harmony_secret" }
|
|||||||
askama.workspace = true
|
askama.workspace = true
|
||||||
sqlx.workspace = true
|
sqlx.workspace = true
|
||||||
inquire.workspace = true
|
inquire.workspace = true
|
||||||
brocade = { path = "../brocade" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
assertor.workspace = true
|
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ pub type FirewallGroup = Vec<PhysicalHost>;
|
|||||||
pub struct PhysicalHost {
|
pub struct PhysicalHost {
|
||||||
pub id: Id,
|
pub id: Id,
|
||||||
pub category: HostCategory,
|
pub category: HostCategory,
|
||||||
pub network: Vec<NetworkInterface>, // FIXME: Don't use harmony_inventory_agent::NetworkInterface
|
pub network: Vec<NetworkInterface>,
|
||||||
pub storage: Vec<StorageDrive>, // FIXME: Don't use harmony_inventory_agent::StorageDrive
|
pub storage: Vec<StorageDrive>,
|
||||||
pub labels: Vec<Label>,
|
pub labels: Vec<Label>,
|
||||||
pub memory_modules: Vec<MemoryModule>, // FIXME: Don't use harmony_inventory_agent::MemoryModule
|
pub memory_modules: Vec<MemoryModule>,
|
||||||
pub cpus: Vec<CPU>, // FIXME: Don't use harmony_inventory_agent::CPU
|
pub cpus: Vec<CPU>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PhysicalHost {
|
impl PhysicalHost {
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ pub enum InterpretName {
|
|||||||
CephClusterHealth,
|
CephClusterHealth,
|
||||||
Custom(&'static str),
|
Custom(&'static str),
|
||||||
RHOBAlerting,
|
RHOBAlerting,
|
||||||
K8sIngress,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for InterpretName {
|
impl std::fmt::Display for InterpretName {
|
||||||
@@ -65,7 +64,6 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||||
InterpretName::Custom(name) => f.write_str(name),
|
InterpretName::Custom(name) => f.write_str(name),
|
||||||
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
|
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
|
||||||
InterpretName::K8sIngress => f.write_str("K8sIngress"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -84,15 +82,13 @@ pub trait Interpret<T>: std::fmt::Debug + Send {
|
|||||||
pub struct Outcome {
|
pub struct Outcome {
|
||||||
pub status: InterpretStatus,
|
pub status: InterpretStatus,
|
||||||
pub message: String,
|
pub message: String,
|
||||||
pub details: Vec<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Outcome {
|
impl Outcome {
|
||||||
pub fn noop(message: String) -> Self {
|
pub fn noop() -> Self {
|
||||||
Self {
|
Self {
|
||||||
status: InterpretStatus::NOOP,
|
status: InterpretStatus::NOOP,
|
||||||
message,
|
message: String::new(),
|
||||||
details: vec![],
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,23 +96,6 @@ impl Outcome {
|
|||||||
Self {
|
Self {
|
||||||
status: InterpretStatus::SUCCESS,
|
status: InterpretStatus::SUCCESS,
|
||||||
message,
|
message,
|
||||||
details: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn success_with_details(message: String, details: Vec<String>) -> Self {
|
|
||||||
Self {
|
|
||||||
status: InterpretStatus::SUCCESS,
|
|
||||||
message,
|
|
||||||
details,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn running(message: String) -> Self {
|
|
||||||
Self {
|
|
||||||
status: InterpretStatus::RUNNING,
|
|
||||||
message,
|
|
||||||
details: vec![],
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,29 +1,12 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use brocade::BrocadeOptions;
|
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
use harmony_types::net::MacAddress;
|
use harmony_types::net::MacAddress;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
use harmony_types::switch::PortLocation;
|
|
||||||
use k8s_openapi::api::core::v1::Namespace;
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::data::FileContent;
|
use crate::data::FileContent;
|
||||||
use crate::executors::ExecutorError;
|
use crate::executors::ExecutorError;
|
||||||
use crate::hardware::PhysicalHost;
|
|
||||||
use crate::infra::brocade::BrocadeSwitchAuth;
|
|
||||||
use crate::infra::brocade::BrocadeSwitchClient;
|
|
||||||
use crate::modules::okd::crd::InstallPlanApproval;
|
|
||||||
use crate::modules::okd::crd::OperatorGroup;
|
|
||||||
use crate::modules::okd::crd::OperatorGroupSpec;
|
|
||||||
use crate::modules::okd::crd::Subscription;
|
|
||||||
use crate::modules::okd::crd::SubscriptionSpec;
|
|
||||||
use crate::modules::okd::crd::nmstate;
|
|
||||||
use crate::modules::okd::crd::nmstate::NMState;
|
|
||||||
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicy;
|
|
||||||
use crate::modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec;
|
|
||||||
use crate::topology::PxeOptions;
|
use crate::topology::PxeOptions;
|
||||||
|
|
||||||
use super::DHCPStaticEntry;
|
use super::DHCPStaticEntry;
|
||||||
@@ -32,7 +15,6 @@ use super::DnsRecord;
|
|||||||
use super::DnsRecordType;
|
use super::DnsRecordType;
|
||||||
use super::DnsServer;
|
use super::DnsServer;
|
||||||
use super::Firewall;
|
use super::Firewall;
|
||||||
use super::HostNetworkConfig;
|
|
||||||
use super::HttpServer;
|
use super::HttpServer;
|
||||||
use super::IpAddress;
|
use super::IpAddress;
|
||||||
use super::K8sclient;
|
use super::K8sclient;
|
||||||
@@ -42,15 +24,10 @@ use super::LogicalHost;
|
|||||||
use super::PreparationError;
|
use super::PreparationError;
|
||||||
use super::PreparationOutcome;
|
use super::PreparationOutcome;
|
||||||
use super::Router;
|
use super::Router;
|
||||||
use super::Switch;
|
|
||||||
use super::SwitchClient;
|
|
||||||
use super::SwitchError;
|
|
||||||
use super::TftpServer;
|
use super::TftpServer;
|
||||||
|
|
||||||
use super::Topology;
|
use super::Topology;
|
||||||
use super::k8s::K8sClient;
|
use super::k8s::K8sClient;
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::net::IpAddr;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@@ -112,231 +89,6 @@ impl HAClusterTopology {
|
|||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
|
||||||
// FIXME: Find a way to check nmstate is already available (get pod -n openshift-nmstate)
|
|
||||||
debug!("Installing NMState operator...");
|
|
||||||
let k8s_client = self.k8s_client().await?;
|
|
||||||
|
|
||||||
let nmstate_namespace = Namespace {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("openshift-nmstate".to_string()),
|
|
||||||
finalizers: Some(vec!["kubernetes".to_string()]),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
debug!("Creating NMState namespace: {nmstate_namespace:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate_namespace, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let nmstate_operator_group = OperatorGroup {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("openshift-nmstate".to_string()),
|
|
||||||
namespace: Some("openshift-nmstate".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: OperatorGroupSpec {
|
|
||||||
target_namespaces: vec!["openshift-nmstate".to_string()],
|
|
||||||
},
|
|
||||||
};
|
|
||||||
debug!("Creating NMState operator group: {nmstate_operator_group:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate_operator_group, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let nmstate_subscription = Subscription {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("kubernetes-nmstate-operator".to_string()),
|
|
||||||
namespace: Some("openshift-nmstate".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: SubscriptionSpec {
|
|
||||||
channel: Some("stable".to_string()),
|
|
||||||
install_plan_approval: Some(InstallPlanApproval::Automatic),
|
|
||||||
name: "kubernetes-nmstate-operator".to_string(),
|
|
||||||
source: "redhat-operators".to_string(),
|
|
||||||
source_namespace: "openshift-marketplace".to_string(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
debug!("Subscribing to NMState Operator: {nmstate_subscription:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate_subscription, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let nmstate = NMState {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("nmstate".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
debug!("Creating NMState: {nmstate:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_next_bond_id(&self) -> u8 {
|
|
||||||
42 // FIXME: Find a better way to declare the bond id
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_bond(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: &HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
|
||||||
self.ensure_nmstate_operator_installed()
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
SwitchError::new(format!(
|
|
||||||
"Can't configure bond, NMState operator not available: {e}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let bond_config = self.create_bond_configuration(host, config);
|
|
||||||
debug!("Configuring bond for host {host:?}: {bond_config:#?}");
|
|
||||||
self.k8s_client()
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.apply(&bond_config, None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_bond_configuration(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: &HostNetworkConfig,
|
|
||||||
) -> NodeNetworkConfigurationPolicy {
|
|
||||||
let host_name = host.id.clone();
|
|
||||||
|
|
||||||
let bond_id = self.get_next_bond_id();
|
|
||||||
let bond_name = format!("bond{bond_id}");
|
|
||||||
let mut bond_mtu: Option<u32> = None;
|
|
||||||
let mut bond_mac_address: Option<String> = None;
|
|
||||||
let mut bond_ports = Vec::new();
|
|
||||||
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
|
||||||
|
|
||||||
for switch_port in &config.switch_ports {
|
|
||||||
let interface_name = switch_port.interface.name.clone();
|
|
||||||
|
|
||||||
interfaces.push(nmstate::InterfaceSpec {
|
|
||||||
name: interface_name.clone(),
|
|
||||||
description: Some(format!("Member of bond {bond_name}")),
|
|
||||||
r#type: "ethernet".to_string(),
|
|
||||||
state: "up".to_string(),
|
|
||||||
mtu: Some(switch_port.interface.mtu),
|
|
||||||
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
|
||||||
enabled: Some(false),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
ipv6: Some(nmstate::IpStackSpec {
|
|
||||||
enabled: Some(false),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
link_aggregation: None,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
bond_ports.push(interface_name);
|
|
||||||
|
|
||||||
// Use the first port's details for the bond mtu and mac address
|
|
||||||
if bond_mtu.is_none() {
|
|
||||||
bond_mtu = Some(switch_port.interface.mtu);
|
|
||||||
}
|
|
||||||
if bond_mac_address.is_none() {
|
|
||||||
bond_mac_address = Some(switch_port.interface.mac_address.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
interfaces.push(nmstate::InterfaceSpec {
|
|
||||||
name: bond_name.clone(),
|
|
||||||
description: Some(format!("Network bond for host {host_name}")),
|
|
||||||
r#type: "bond".to_string(),
|
|
||||||
state: "up".to_string(),
|
|
||||||
mtu: bond_mtu,
|
|
||||||
mac_address: bond_mac_address,
|
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
|
||||||
dhcp: Some(true),
|
|
||||||
enabled: Some(true),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
ipv6: Some(nmstate::IpStackSpec {
|
|
||||||
dhcp: Some(true),
|
|
||||||
autoconf: Some(true),
|
|
||||||
enabled: Some(true),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
link_aggregation: Some(nmstate::BondSpec {
|
|
||||||
mode: "802.3ad".to_string(),
|
|
||||||
ports: bond_ports,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
NodeNetworkConfigurationPolicy {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(format!("{host_name}-bond-config")),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: NodeNetworkConfigurationPolicySpec {
|
|
||||||
node_selector: Some(BTreeMap::from([(
|
|
||||||
"kubernetes.io/hostname".to_string(),
|
|
||||||
host_name.to_string(),
|
|
||||||
)])),
|
|
||||||
desired_state: nmstate::DesiredStateSpec { interfaces },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_switch_client(&self) -> Result<Box<dyn SwitchClient>, SwitchError> {
|
|
||||||
let auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("Failed to get credentials: {e}")))?;
|
|
||||||
|
|
||||||
// FIXME: We assume Brocade switches
|
|
||||||
let switches: Vec<IpAddr> = self.switch.iter().map(|s| s.ip).collect();
|
|
||||||
let brocade_options = Some(BrocadeOptions {
|
|
||||||
dry_run: *crate::config::DRY_RUN,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
let client =
|
|
||||||
BrocadeSwitchClient::init(&switches, &auth.username, &auth.password, brocade_options)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("Failed to connect to switch: {e}")))?;
|
|
||||||
|
|
||||||
Ok(Box::new(client))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_port_channel(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: &HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
|
||||||
debug!("Configuring port channel: {config:#?}");
|
|
||||||
let client = self.get_switch_client().await?;
|
|
||||||
|
|
||||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
|
||||||
|
|
||||||
client
|
|
||||||
.configure_port_channel(&format!("Harmony_{}", host.id), switch_ports)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn autoload() -> Self {
|
pub fn autoload() -> Self {
|
||||||
let dummy_infra = Arc::new(DummyInfra {});
|
let dummy_infra = Arc::new(DummyInfra {});
|
||||||
let dummy_host = LogicalHost {
|
let dummy_host = LogicalHost {
|
||||||
@@ -511,27 +263,6 @@ impl HttpServer for HAClusterTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Switch for HAClusterTopology {
|
|
||||||
async fn get_port_for_mac_address(
|
|
||||||
&self,
|
|
||||||
mac_address: &MacAddress,
|
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
|
||||||
let client = self.get_switch_client().await?;
|
|
||||||
let port = client.find_port(mac_address).await?;
|
|
||||||
Ok(port)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_host_network(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
|
||||||
// self.configure_bond(host, &config).await?;
|
|
||||||
self.configure_port_channel(host, &config).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DummyInfra;
|
pub struct DummyInfra;
|
||||||
|
|
||||||
@@ -601,8 +332,8 @@ impl DhcpServer for DummyInfra {
|
|||||||
}
|
}
|
||||||
async fn set_dhcp_range(
|
async fn set_dhcp_range(
|
||||||
&self,
|
&self,
|
||||||
_start: &IpAddress,
|
start: &IpAddress,
|
||||||
_end: &IpAddress,
|
end: &IpAddress,
|
||||||
) -> Result<(), ExecutorError> {
|
) -> Result<(), ExecutorError> {
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
use crate::topology::PreparationError;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Ingress {
|
|
||||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError>;
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::{process::Command, sync::Arc};
|
use std::{process::Command, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use kube::api::GroupVersionKind;
|
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
@@ -23,7 +22,6 @@ use crate::{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::ingress::Ingress,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@@ -200,26 +198,6 @@ impl K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
|
||||||
let client = self.k8s_client().await?;
|
|
||||||
let gvk = GroupVersionKind {
|
|
||||||
group: "operator.openshift.io".into(),
|
|
||||||
version: "v1".into(),
|
|
||||||
kind: "IngressController".into(),
|
|
||||||
};
|
|
||||||
let ic = client
|
|
||||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
|
||||||
.await?;
|
|
||||||
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
|
|
||||||
if ready_replicas >= 1 {
|
|
||||||
return Ok(());
|
|
||||||
} else {
|
|
||||||
return Err(PreparationError::new(
|
|
||||||
"openshift-ingress-operator not available".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_helm_available(&self) -> Result<(), String> {
|
fn is_helm_available(&self) -> Result<(), String> {
|
||||||
let version_result = Command::new("helm")
|
let version_result = Command::new("helm")
|
||||||
.arg("version")
|
.arg("version")
|
||||||
@@ -372,10 +350,6 @@ impl K8sAnywhereTopology {
|
|||||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||||
match k8s_state.source {
|
match k8s_state.source {
|
||||||
K8sSource::LocalK3d => {
|
K8sSource::LocalK3d => {
|
||||||
warn!(
|
|
||||||
"Installing observability operator is not supported on LocalK3d source"
|
|
||||||
);
|
|
||||||
return Ok(PreparationOutcome::Noop);
|
|
||||||
debug!("installing cluster observability operator");
|
debug!("installing cluster observability operator");
|
||||||
todo!();
|
todo!();
|
||||||
let op_score =
|
let op_score =
|
||||||
@@ -554,7 +528,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
|
|||||||
match self.config.harmony_profile.to_lowercase().as_str() {
|
match self.config.harmony_profile.to_lowercase().as_str() {
|
||||||
"staging" => DeploymentTarget::Staging,
|
"staging" => DeploymentTarget::Staging,
|
||||||
"production" => DeploymentTarget::Production,
|
"production" => DeploymentTarget::Production,
|
||||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"),
|
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -576,45 +550,3 @@ impl TenantManager for K8sAnywhereTopology {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Ingress for K8sAnywhereTopology {
|
|
||||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
|
||||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
|
||||||
let client = self.k8s_client().await?;
|
|
||||||
|
|
||||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
|
||||||
match k8s_state.source {
|
|
||||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
|
||||||
K8sSource::Kubeconfig => {
|
|
||||||
self.openshift_ingress_operator_available().await?;
|
|
||||||
|
|
||||||
let gvk = GroupVersionKind {
|
|
||||||
group: "operator.openshift.io".into(),
|
|
||||||
version: "v1".into(),
|
|
||||||
kind: "IngressController".into(),
|
|
||||||
};
|
|
||||||
let ic = client
|
|
||||||
.get_resource_json_value(
|
|
||||||
"default",
|
|
||||||
Some("openshift-ingress-operator"),
|
|
||||||
&gvk,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| {
|
|
||||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
match ic.data["status"]["domain"].as_str() {
|
|
||||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
|
||||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(PreparationError::new(
|
|
||||||
"Cannot get domain: unable to detect K8s state".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
||||||
|
|
||||||
#[derive(new, Clone, Debug, Serialize, Deserialize)]
|
#[derive(new)]
|
||||||
pub struct LocalhostTopology;
|
pub struct LocalhostTopology;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
pub mod ingress;
|
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
mod http;
|
mod http;
|
||||||
|
|||||||
@@ -1,14 +1,10 @@
|
|||||||
use std::{error::Error, net::Ipv4Addr, str::FromStr, sync::Arc};
|
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use harmony_types::net::{IpAddress, MacAddress};
|
||||||
use harmony_types::{
|
|
||||||
net::{IpAddress, MacAddress},
|
|
||||||
switch::PortLocation,
|
|
||||||
};
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{executors::ExecutorError, hardware::PhysicalHost};
|
use crate::executors::ExecutorError;
|
||||||
|
|
||||||
use super::{LogicalHost, k8s::K8sClient};
|
use super::{LogicalHost, k8s::K8sClient};
|
||||||
|
|
||||||
@@ -176,66 +172,6 @@ impl FromStr for DnsRecordType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Switch: Send + Sync {
|
|
||||||
async fn get_port_for_mac_address(
|
|
||||||
&self,
|
|
||||||
mac_address: &MacAddress,
|
|
||||||
) -> Result<Option<PortLocation>, SwitchError>;
|
|
||||||
|
|
||||||
async fn configure_host_network(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub struct HostNetworkConfig {
|
|
||||||
pub switch_ports: Vec<SwitchPort>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub struct SwitchPort {
|
|
||||||
pub interface: NetworkInterface,
|
|
||||||
pub port: PortLocation,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub struct NetworkInterface {
|
|
||||||
pub name: String,
|
|
||||||
pub mac_address: MacAddress,
|
|
||||||
pub speed_mbps: Option<u32>,
|
|
||||||
pub mtu: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, new)]
|
|
||||||
pub struct SwitchError {
|
|
||||||
msg: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for SwitchError {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(&self.msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for SwitchError {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait SwitchClient: Send + Sync {
|
|
||||||
async fn find_port(
|
|
||||||
&self,
|
|
||||||
mac_address: &MacAddress,
|
|
||||||
) -> Result<Option<PortLocation>, SwitchError>;
|
|
||||||
|
|
||||||
async fn configure_port_channel(
|
|
||||||
&self,
|
|
||||||
channel_name: &str,
|
|
||||||
switch_ports: Vec<PortLocation>,
|
|
||||||
) -> Result<u8, SwitchError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
@@ -1,81 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use brocade::{BrocadeClient, BrocadeOptions};
|
|
||||||
use harmony_secret::Secret;
|
|
||||||
use harmony_types::{
|
|
||||||
net::{IpAddress, MacAddress},
|
|
||||||
switch::{PortDeclaration, PortLocation},
|
|
||||||
};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::topology::{SwitchClient, SwitchError};
|
|
||||||
|
|
||||||
pub struct BrocadeSwitchClient {
|
|
||||||
brocade: Box<dyn BrocadeClient + Send + Sync>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BrocadeSwitchClient {
|
|
||||||
pub async fn init(
|
|
||||||
ip_addresses: &[IpAddress],
|
|
||||||
username: &str,
|
|
||||||
password: &str,
|
|
||||||
options: Option<BrocadeOptions>,
|
|
||||||
) -> Result<Self, brocade::Error> {
|
|
||||||
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
|
|
||||||
Ok(Self { brocade })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl SwitchClient for BrocadeSwitchClient {
|
|
||||||
async fn find_port(
|
|
||||||
&self,
|
|
||||||
mac_address: &MacAddress,
|
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
|
||||||
let table = self
|
|
||||||
.brocade
|
|
||||||
.show_mac_address_table()
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
|
||||||
|
|
||||||
let port = table
|
|
||||||
.iter()
|
|
||||||
.find(|entry| entry.mac_address == *mac_address)
|
|
||||||
.map(|entry| match &entry.port {
|
|
||||||
PortDeclaration::Single(port_location) => Ok(port_location.clone()),
|
|
||||||
_ => Err(SwitchError::new(
|
|
||||||
"Multiple ports found for MAC address".into(),
|
|
||||||
)),
|
|
||||||
});
|
|
||||||
|
|
||||||
match port {
|
|
||||||
Some(Ok(p)) => Ok(Some(p)),
|
|
||||||
Some(Err(e)) => Err(e),
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_port_channel(
|
|
||||||
&self,
|
|
||||||
channel_name: &str,
|
|
||||||
switch_ports: Vec<PortLocation>,
|
|
||||||
) -> Result<u8, SwitchError> {
|
|
||||||
let channel_id = self
|
|
||||||
.brocade
|
|
||||||
.find_available_channel_id()
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
|
||||||
|
|
||||||
self.brocade
|
|
||||||
.create_port_channel(channel_id, channel_name, &switch_ports)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
|
||||||
|
|
||||||
Ok(channel_id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug)]
|
|
||||||
pub struct BrocadeSwitchAuth {
|
|
||||||
pub username: String,
|
|
||||||
pub password: String,
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
pub mod brocade;
|
|
||||||
pub mod executors;
|
pub mod executors;
|
||||||
pub mod hp_ilo;
|
pub mod hp_ilo;
|
||||||
pub mod intel_amt;
|
pub mod intel_amt;
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
use std::error::Error;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{executors::ExecutorError, topology::Topology};
|
use crate::topology::Topology;
|
||||||
|
|
||||||
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
||||||
/// ContinuousIntegration, ContinuousDelivery
|
/// ContinuousIntegration, ContinuousDelivery
|
||||||
@@ -12,10 +9,7 @@ use crate::{executors::ExecutorError, topology::Topology};
|
|||||||
pub trait ApplicationFeature<T: Topology>:
|
pub trait ApplicationFeature<T: Topology>:
|
||||||
std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T>
|
std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T>
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String>;
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError>;
|
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,60 +40,3 @@ impl<T: Topology> Clone for Box<dyn ApplicationFeature<T>> {
|
|||||||
self.clone_box()
|
self.clone_box()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub enum InstallationOutcome {
|
|
||||||
Success { details: Vec<String> },
|
|
||||||
Noop,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InstallationOutcome {
|
|
||||||
pub fn success() -> Self {
|
|
||||||
Self::Success { details: vec![] }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn success_with_details(details: Vec<String>) -> Self {
|
|
||||||
Self::Success { details }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn noop() -> Self {
|
|
||||||
Self::Noop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, new)]
|
|
||||||
pub struct InstallationError {
|
|
||||||
msg: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for InstallationError {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(&self.msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for InstallationError {}
|
|
||||||
|
|
||||||
impl From<ExecutorError> for InstallationError {
|
|
||||||
fn from(value: ExecutorError) -> Self {
|
|
||||||
Self {
|
|
||||||
msg: format!("InstallationError : {value}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<kube::Error> for InstallationError {
|
|
||||||
fn from(value: kube::Error) -> Self {
|
|
||||||
Self {
|
|
||||||
msg: format!("InstallationError : {value}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<String> for InstallationError {
|
|
||||||
fn from(value: String) -> Self {
|
|
||||||
Self {
|
|
||||||
msg: format!("PreparationError : {value}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,13 +10,11 @@ use crate::{
|
|||||||
data::Version,
|
data::Version,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::application::{
|
modules::application::{
|
||||||
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
ApplicationFeature, HelmPackage, OCICompliant,
|
||||||
features::{ArgoApplication, ArgoHelmScore},
|
features::{ArgoApplication, ArgoHelmScore},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{
|
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// ContinuousDelivery in Harmony provides this functionality :
|
/// ContinuousDelivery in Harmony provides this functionality :
|
||||||
@@ -47,11 +45,60 @@ use crate::{
|
|||||||
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
||||||
/// - Kubernetes for runtime orchestration
|
/// - Kubernetes for runtime orchestration
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Default, Clone)]
|
||||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
|
pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
|
||||||
pub application: Arc<A>,
|
pub application: Arc<A>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||||
|
pub async fn deploy<T>(&self, topology: &T, helm_chart: String, image: String) -> Result<(), String>
|
||||||
|
where
|
||||||
|
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||||
|
{
|
||||||
|
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
|
||||||
|
// by the topology only and we should not have to know how to perform tasks like this for
|
||||||
|
// which the topology should be responsible.
|
||||||
|
//
|
||||||
|
// That said, this will require some careful architectural decisions, since the concept of
|
||||||
|
// deployment targets / profiles is probably a layer of complexity that we won't be
|
||||||
|
// completely able to avoid
|
||||||
|
//
|
||||||
|
// I'll try something for now that must be thought through after : att a deployment_profile
|
||||||
|
// function to the topology trait that returns a profile, then anybody who needs it can
|
||||||
|
// access it. This forces every Topology to understand the concept of targets though... So
|
||||||
|
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
||||||
|
// goes. It still does not feel right though.
|
||||||
|
//
|
||||||
|
// https://git.nationtech.io/NationTech/harmony/issues/106
|
||||||
|
match topology.current_target() {
|
||||||
|
DeploymentTarget::LocalDev => {
|
||||||
|
info!("Deploying {} locally...", self.application.name());
|
||||||
|
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
target => {
|
||||||
|
info!("Deploying {} to target {target:?}", self.application.name());
|
||||||
|
|
||||||
|
let score = ArgoHelmScore {
|
||||||
|
namespace: format!("{}", self.application.name()),
|
||||||
|
openshift: true,
|
||||||
|
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||||
|
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||||
|
version: Version::from("0.1.0").unwrap(),
|
||||||
|
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||||
|
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||||
|
values_overrides: None,
|
||||||
|
name: format!("{}", self.application.name()),
|
||||||
|
namespace: format!("{}", self.application.name()),
|
||||||
|
})],
|
||||||
|
};
|
||||||
|
score
|
||||||
|
.interpret(&Inventory::empty(), topology)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
async fn deploy_to_local_k3d(
|
async fn deploy_to_local_k3d(
|
||||||
&self,
|
&self,
|
||||||
app_name: String,
|
app_name: String,
|
||||||
@@ -138,81 +185,24 @@ impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<
|
impl<
|
||||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||||
> ApplicationFeature<T> for PackagingDeployment<A>
|
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
let image = self.application.image_name();
|
let image = self.application.image_name();
|
||||||
let domain = topology
|
|
||||||
.get_domain(&self.application.name())
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
// TODO Write CI/CD workflow files
|
// TODO Write CI/CD workflow files
|
||||||
// we can autotedect the CI type using the remote url (default to github action for github
|
// we can autotedect the CI type using the remote url (default to github action for github
|
||||||
// url, etc..)
|
// url, etc..)
|
||||||
// Or ask for it when unknown
|
// Or ask for it when unknown
|
||||||
|
|
||||||
let helm_chart = self
|
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||||
.application
|
|
||||||
.build_push_helm_package(&image, &domain)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// TODO: Make building image configurable/skippable if image already exists (prompt)")
|
// TODO: Make building image configurable/skippable if image already exists (prompt)")
|
||||||
// https://git.nationtech.io/NationTech/harmony/issues/104
|
// https://git.nationtech.io/NationTech/harmony/issues/104
|
||||||
let image = self.application.build_push_oci_image().await?;
|
let image = self.application.build_push_oci_image().await?;
|
||||||
|
|
||||||
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
|
self.deploy(topology, helm_chart, image).await
|
||||||
// by the topology only and we should not have to know how to perform tasks like this for
|
|
||||||
// which the topology should be responsible.
|
|
||||||
//
|
|
||||||
// That said, this will require some careful architectural decisions, since the concept of
|
|
||||||
// deployment targets / profiles is probably a layer of complexity that we won't be
|
|
||||||
// completely able to avoid
|
|
||||||
//
|
|
||||||
// I'll try something for now that must be thought through after : att a deployment_profile
|
|
||||||
// function to the topology trait that returns a profile, then anybody who needs it can
|
|
||||||
// access it. This forces every Topology to understand the concept of targets though... So
|
|
||||||
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
|
||||||
// goes. It still does not feel right though.
|
|
||||||
//
|
|
||||||
// https://git.nationtech.io/NationTech/harmony/issues/106
|
|
||||||
match topology.current_target() {
|
|
||||||
DeploymentTarget::LocalDev => {
|
|
||||||
info!("Deploying {} locally...", self.application.name());
|
|
||||||
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
target => {
|
|
||||||
info!("Deploying {} to target {target:?}", self.application.name());
|
|
||||||
|
|
||||||
let score = ArgoHelmScore {
|
|
||||||
namespace: format!("{}", self.application.name()),
|
|
||||||
openshift: true,
|
|
||||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
|
||||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
|
||||||
version: Version::from("0.1.0").unwrap(),
|
|
||||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
|
||||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
|
||||||
values_overrides: None,
|
|
||||||
name: format!("{}", self.application.name()),
|
|
||||||
namespace: format!("{}", self.application.name()),
|
|
||||||
})],
|
|
||||||
};
|
|
||||||
score
|
|
||||||
.interpret(&Inventory::empty(), topology)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(InstallationOutcome::success_with_details(vec![format!(
|
|
||||||
"{}: http://{domain}",
|
|
||||||
self.application.name()
|
|
||||||
)]))
|
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"ContinuousDelivery".to_string()
|
"ContinuousDelivery".to_string()
|
||||||
@@ -2,7 +2,7 @@ use async_trait::async_trait;
|
|||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::application::{ApplicationFeature, InstallationError, InstallationOutcome},
|
modules::application::ApplicationFeature,
|
||||||
topology::{K8sclient, Topology},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -29,10 +29,7 @@ impl Default for PublicEndpoint {
|
|||||||
/// For now we only suport K8s ingress, but we will support more stuff at some point
|
/// For now we only suport K8s ingress, but we will support more stuff at some point
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
|
impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
_topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
info!(
|
info!(
|
||||||
"Making sure public endpoint is installed for port {}",
|
"Making sure public endpoint is installed for port {}",
|
||||||
self.application_port
|
self.application_port
|
||||||
|
|||||||
@@ -13,8 +13,7 @@ use crate::{
|
|||||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{
|
topology::{
|
||||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
|
||||||
k8s::K8sClient,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
@@ -28,7 +27,7 @@ pub struct ArgoHelmScore {
|
|||||||
pub argo_apps: Vec<ArgoApplication>,
|
pub argo_apps: Vec<ArgoApplication>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore {
|
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
Box::new(ArgoInterpret {
|
Box::new(ArgoInterpret {
|
||||||
score: self.clone(),
|
score: self.clone(),
|
||||||
@@ -48,15 +47,17 @@ pub struct ArgoInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret {
|
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let k8s_client = topology.k8s_client().await?;
|
let k8s_client = topology.k8s_client().await?;
|
||||||
let svc = format!("argo-{}", self.score.namespace.clone());
|
let domain = self
|
||||||
let domain = topology.get_domain(&svc).await?;
|
.get_host_domain(k8s_client.clone(), self.score.openshift)
|
||||||
|
.await?;
|
||||||
|
let domain = format!("argo.{domain}");
|
||||||
let helm_score =
|
let helm_score =
|
||||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||||
|
|
||||||
@@ -67,17 +68,14 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(
|
Ok(Outcome::success(format!(
|
||||||
format!(
|
"ArgoCD installed with {} {}",
|
||||||
"ArgoCD {} {}",
|
self.argo_apps.len(),
|
||||||
self.argo_apps.len(),
|
match self.argo_apps.len() {
|
||||||
match self.argo_apps.len() {
|
1 => "application",
|
||||||
1 => "application",
|
_ => "applications",
|
||||||
_ => "applications",
|
}
|
||||||
}
|
)))
|
||||||
),
|
|
||||||
vec![format!("argo application: http://{}", domain)],
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
@@ -160,9 +158,6 @@ global:
|
|||||||
## Used for ingresses, certificates, SSO, notifications, etc.
|
## Used for ingresses, certificates, SSO, notifications, etc.
|
||||||
domain: {domain}
|
domain: {domain}
|
||||||
|
|
||||||
securityContext:
|
|
||||||
runAsUser: null
|
|
||||||
|
|
||||||
# -- Runtime class name for all components
|
# -- Runtime class name for all components
|
||||||
runtimeClassName: ""
|
runtimeClassName: ""
|
||||||
|
|
||||||
@@ -474,13 +469,6 @@ redis:
|
|||||||
# -- Redis name
|
# -- Redis name
|
||||||
name: redis
|
name: redis
|
||||||
|
|
||||||
serviceAccount:
|
|
||||||
create: true
|
|
||||||
|
|
||||||
securityContext:
|
|
||||||
runAsUser: null
|
|
||||||
|
|
||||||
|
|
||||||
## Redis image
|
## Redis image
|
||||||
image:
|
image:
|
||||||
# -- Redis repository
|
# -- Redis repository
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
mod endpoint;
|
mod endpoint;
|
||||||
pub mod rhob_monitoring;
|
pub mod rhob_monitoring;
|
||||||
|
mod multisite;
|
||||||
pub use endpoint::*;
|
pub use endpoint::*;
|
||||||
|
|
||||||
mod monitoring;
|
mod monitoring;
|
||||||
pub use monitoring::*;
|
pub use monitoring::*;
|
||||||
|
|
||||||
mod packaging_deployment;
|
mod continuous_delivery;
|
||||||
pub use packaging_deployment::*;
|
pub use continuous_delivery::*;
|
||||||
|
|
||||||
mod helm_argocd_score;
|
mod helm_argocd_score;
|
||||||
pub use helm_argocd_score::*;
|
pub use helm_argocd_score::*;
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use crate::modules::application::{
|
use std::sync::Arc;
|
||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
|
||||||
};
|
use crate::modules::application::{Application, ApplicationFeature};
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||||
|
|
||||||
use crate::topology::MultiTargetTopology;
|
use crate::topology::MultiTargetTopology;
|
||||||
use crate::topology::ingress::Ingress;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::monitoring::{
|
modules::monitoring::{
|
||||||
@@ -19,12 +19,8 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use base64::{Engine as _, engine::general_purpose};
|
use base64::{Engine as _, engine::general_purpose};
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
use harmony_secret_derive::Secret;
|
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Monitoring {
|
pub struct Monitoring {
|
||||||
@@ -40,22 +36,17 @@ impl<
|
|||||||
+ TenantManager
|
+ TenantManager
|
||||||
+ K8sclient
|
+ K8sclient
|
||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
+ std::fmt::Debug
|
||||||
+ Ingress
|
+ PrometheusApplicationMonitoring<CRDPrometheus>,
|
||||||
+ std::fmt::Debug,
|
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
info!("Ensuring monitoring is available for application");
|
info!("Ensuring monitoring is available for application");
|
||||||
let namespace = topology
|
let namespace = topology
|
||||||
.get_tenant_config()
|
.get_tenant_config()
|
||||||
.await
|
.await
|
||||||
.map(|ns| ns.name.clone())
|
.map(|ns| ns.name.clone())
|
||||||
.unwrap_or_else(|| self.application.name());
|
.unwrap_or_else(|| self.application.name());
|
||||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
|
||||||
|
|
||||||
let mut alerting_score = ApplicationMonitoringScore {
|
let mut alerting_score = ApplicationMonitoringScore {
|
||||||
sender: CRDPrometheus {
|
sender: CRDPrometheus {
|
||||||
@@ -67,17 +58,19 @@ impl<
|
|||||||
};
|
};
|
||||||
let ntfy = NtfyScore {
|
let ntfy = NtfyScore {
|
||||||
namespace: namespace.clone(),
|
namespace: namespace.clone(),
|
||||||
host: domain,
|
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||||
};
|
};
|
||||||
ntfy.interpret(&Inventory::empty(), topology)
|
ntfy.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| e.to_string())?;
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<NtfyAuth>().await.unwrap();
|
let ntfy_default_auth_username = "harmony";
|
||||||
|
let ntfy_default_auth_password = "harmony";
|
||||||
let ntfy_default_auth_header = format!(
|
let ntfy_default_auth_header = format!(
|
||||||
"Basic {}",
|
"Basic {}",
|
||||||
general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password))
|
general_purpose::STANDARD.encode(format!(
|
||||||
|
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||||
|
))
|
||||||
);
|
);
|
||||||
|
|
||||||
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||||
@@ -107,17 +100,9 @@ impl<
|
|||||||
.interpret(&Inventory::empty(), topology)
|
.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| e.to_string())?;
|
.map_err(|e| e.to_string())?;
|
||||||
|
Ok(())
|
||||||
Ok(InstallationOutcome::success())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"Monitoring".to_string()
|
"Monitoring".to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Clone, Debug)]
|
|
||||||
struct NtfyAuth {
|
|
||||||
username: String,
|
|
||||||
password: String,
|
|
||||||
}
|
|
||||||
|
|||||||
49
harmony/src/modules/application/features/multisite.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::modules::application::{Application, ApplicationFeature, StatelessApplication};
|
||||||
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||||
|
|
||||||
|
use crate::topology::{K8sAnywhereTopology, MultiTargetTopology};
|
||||||
|
use crate::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::monitoring::{
|
||||||
|
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||||
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use base64::{Engine as _, engine::general_purpose};
|
||||||
|
use harmony_types::net::Url;
|
||||||
|
use log::{debug, info};
|
||||||
|
|
||||||
|
trait DebugTopology: Topology + std::fmt::Debug {}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Multisite {
|
||||||
|
app: Arc<dyn StatelessApplication>,
|
||||||
|
secondary_site: Arc<K8sAnywhereTopology>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology> ApplicationFeature<T> for Multisite {
|
||||||
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
|
|
||||||
|
todo!(
|
||||||
|
"
|
||||||
|
- Find a way to get pvs for this application
|
||||||
|
- find the pv csi volumes uuid
|
||||||
|
- run rbd mirror image enable --pool mirrored-pool csi-vol-<UUID_PV> snapshot
|
||||||
|
- enjoy
|
||||||
|
"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"Multisite".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,14 +1,11 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::modules::application::{
|
use crate::modules::application::{Application, ApplicationFeature};
|
||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
|
||||||
};
|
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
use crate::topology::MultiTargetTopology;
|
use crate::topology::MultiTargetTopology;
|
||||||
use crate::topology::ingress::Ingress;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::monitoring::{
|
modules::monitoring::{
|
||||||
@@ -27,7 +24,7 @@ use harmony_types::net::Url;
|
|||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Monitoring {
|
pub struct RHOBMonitoring {
|
||||||
pub application: Arc<dyn Application>,
|
pub application: Arc<dyn Application>,
|
||||||
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||||
}
|
}
|
||||||
@@ -40,15 +37,11 @@ impl<
|
|||||||
+ TenantManager
|
+ TenantManager
|
||||||
+ K8sclient
|
+ K8sclient
|
||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ Ingress
|
|
||||||
+ std::fmt::Debug
|
+ std::fmt::Debug
|
||||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for RHOBMonitoring
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
info!("Ensuring monitoring is available for application");
|
info!("Ensuring monitoring is available for application");
|
||||||
let namespace = topology
|
let namespace = topology
|
||||||
.get_tenant_config()
|
.get_tenant_config()
|
||||||
@@ -64,13 +57,9 @@ impl<
|
|||||||
application: self.application.clone(),
|
application: self.application.clone(),
|
||||||
receivers: self.alert_receiver.clone(),
|
receivers: self.alert_receiver.clone(),
|
||||||
};
|
};
|
||||||
let domain = topology
|
|
||||||
.get_domain("ntfy")
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("could not get domain {e}"))?;
|
|
||||||
let ntfy = NtfyScore {
|
let ntfy = NtfyScore {
|
||||||
namespace: namespace.clone(),
|
namespace: namespace.clone(),
|
||||||
host: domain.clone(),
|
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||||
};
|
};
|
||||||
ntfy.interpret(&Inventory::empty(), topology)
|
ntfy.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
@@ -92,33 +81,27 @@ impl<
|
|||||||
.replace("=", "");
|
.replace("=", "");
|
||||||
|
|
||||||
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||||
|
|
||||||
let ntfy_receiver = WebhookReceiver {
|
let ntfy_receiver = WebhookReceiver {
|
||||||
name: "ntfy-webhook".to_string(),
|
name: "ntfy-webhook".to_string(),
|
||||||
url: Url::Url(
|
url: Url::Url(
|
||||||
url::Url::parse(
|
url::Url::parse(
|
||||||
format!(
|
format!(
|
||||||
"http://{domain}/{}?auth={ntfy_default_auth_param}",
|
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||||
self.application.name()
|
namespace.clone()
|
||||||
)
|
)
|
||||||
.as_str(),
|
.as_str(),
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
debug!(
|
|
||||||
"ntfy webhook receiver \n{:#?}\nntfy topic: {}",
|
|
||||||
ntfy_receiver.clone(),
|
|
||||||
self.application.name()
|
|
||||||
);
|
|
||||||
alerting_score.receivers.push(Box::new(ntfy_receiver));
|
alerting_score.receivers.push(Box::new(ntfy_receiver));
|
||||||
alerting_score
|
alerting_score
|
||||||
.interpret(&Inventory::empty(), topology)
|
.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| e.to_string())?;
|
.map_err(|e| e.to_string())?;
|
||||||
Ok(InstallationOutcome::success_with_details(vec![format!(
|
Ok(())
|
||||||
"ntfy topic: {}",
|
|
||||||
self.application.name()
|
|
||||||
)]))
|
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"Monitoring".to_string()
|
"Monitoring".to_string()
|
||||||
|
|||||||
@@ -2,6 +2,10 @@ mod feature;
|
|||||||
pub mod features;
|
pub mod features;
|
||||||
pub mod oci;
|
pub mod oci;
|
||||||
mod rust;
|
mod rust;
|
||||||
|
mod stateless;
|
||||||
|
mod stateful;
|
||||||
|
pub use stateless::*;
|
||||||
|
pub use stateful::*;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub use feature::*;
|
pub use feature::*;
|
||||||
@@ -24,8 +28,8 @@ use harmony_types::id::Id;
|
|||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum ApplicationFeatureStatus {
|
pub enum ApplicationFeatureStatus {
|
||||||
Installing,
|
Installing,
|
||||||
Installed { details: Vec<String> },
|
Installed,
|
||||||
Failed { message: String },
|
Failed { details: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Application: std::fmt::Debug + Send + Sync {
|
pub trait Application: std::fmt::Debug + Send + Sync {
|
||||||
@@ -65,32 +69,27 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let _ = match feature.ensure_installed(topology).await {
|
let _ = match feature.ensure_installed(topology).await {
|
||||||
Ok(outcome) => {
|
Ok(()) => {
|
||||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||||
topology: topology.name().into(),
|
topology: topology.name().into(),
|
||||||
application: self.application.name(),
|
application: self.application.name(),
|
||||||
feature: feature.name(),
|
feature: feature.name(),
|
||||||
status: ApplicationFeatureStatus::Installed {
|
status: ApplicationFeatureStatus::Installed,
|
||||||
details: match outcome {
|
|
||||||
InstallationOutcome::Success { details } => details,
|
|
||||||
InstallationOutcome::Noop => vec![],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(msg) => {
|
||||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||||
topology: topology.name().into(),
|
topology: topology.name().into(),
|
||||||
application: self.application.name(),
|
application: self.application.name(),
|
||||||
feature: feature.name(),
|
feature: feature.name(),
|
||||||
status: ApplicationFeatureStatus::Failed {
|
status: ApplicationFeatureStatus::Failed {
|
||||||
message: error.to_string(),
|
details: msg.clone(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!(
|
||||||
"Application Interpret failed to install feature : {error}"
|
"Application Interpret failed to install feature : {msg}"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use super::Application;
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use super::Application;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait OCICompliant: Application {
|
pub trait OCICompliant: Application {
|
||||||
async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here
|
async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here
|
||||||
@@ -16,10 +17,5 @@ pub trait HelmPackage: Application {
|
|||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
|
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
|
||||||
/// * `domain` - The domain where the application is hosted.
|
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>;
|
||||||
async fn build_push_helm_package(
|
|
||||||
&self,
|
|
||||||
image_url: &str,
|
|
||||||
domain: &str,
|
|
||||||
) -> Result<String, String>;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use std::fs::{self};
|
use std::fs::{self, File};
|
||||||
|
use std::io::Read;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process;
|
use std::process;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -10,13 +11,14 @@ use dockerfile_builder::Dockerfile;
|
|||||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use log::{debug, error, info, log_enabled, trace, warn};
|
use log::{debug, info, log_enabled};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tar::{Builder, Header};
|
use tar::{Archive, Builder, Header};
|
||||||
use walkdir::WalkDir;
|
use walkdir::WalkDir;
|
||||||
|
|
||||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||||
use crate::{score::Score, topology::Topology};
|
use crate::{score::Score, topology::Topology};
|
||||||
|
use harmony_types::net::Url;
|
||||||
|
|
||||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||||
|
|
||||||
@@ -56,6 +58,7 @@ pub enum RustWebFramework {
|
|||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct RustWebapp {
|
pub struct RustWebapp {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
pub domain: Url,
|
||||||
/// The path to the root of the Rust project to be containerized.
|
/// The path to the root of the Rust project to be containerized.
|
||||||
pub project_root: PathBuf,
|
pub project_root: PathBuf,
|
||||||
pub service_port: u32,
|
pub service_port: u32,
|
||||||
@@ -70,17 +73,12 @@ impl Application for RustWebapp {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl HelmPackage for RustWebapp {
|
impl HelmPackage for RustWebapp {
|
||||||
async fn build_push_helm_package(
|
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||||
&self,
|
|
||||||
image_url: &str,
|
|
||||||
domain: &str,
|
|
||||||
) -> Result<String, String> {
|
|
||||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
info!("Starting Helm chart build and push for '{}'", self.name);
|
||||||
|
|
||||||
// 1. Create the Helm chart files on disk.
|
// 1. Create the Helm chart files on disk.
|
||||||
let chart_dir = self
|
let chart_dir = self
|
||||||
.create_helm_chart_files(image_url, domain)
|
.create_helm_chart_files(image_url)
|
||||||
.await
|
|
||||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||||
|
|
||||||
@@ -162,7 +160,7 @@ impl RustWebapp {
|
|||||||
&self,
|
&self,
|
||||||
image_name: &str,
|
image_name: &str,
|
||||||
) -> Result<String, Box<dyn std::error::Error>> {
|
) -> Result<String, Box<dyn std::error::Error>> {
|
||||||
info!("Generating Dockerfile for '{}'", self.name);
|
debug!("Generating Dockerfile for '{}'", self.name);
|
||||||
let dockerfile = self.get_or_build_dockerfile();
|
let dockerfile = self.get_or_build_dockerfile();
|
||||||
let quiet = !log_enabled!(log::Level::Debug);
|
let quiet = !log_enabled!(log::Level::Debug);
|
||||||
match dockerfile
|
match dockerfile
|
||||||
@@ -194,41 +192,8 @@ impl RustWebapp {
|
|||||||
Some(body_full(tar_data.into())),
|
Some(body_full(tar_data.into())),
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(mut msg) = image_build_stream.next().await {
|
while let Some(msg) = image_build_stream.next().await {
|
||||||
trace!("Got bollard msg {msg:?}");
|
debug!("Message: {msg:?}");
|
||||||
match msg {
|
|
||||||
Ok(mut msg) => {
|
|
||||||
if let Some(progress) = msg.progress_detail {
|
|
||||||
info!(
|
|
||||||
"Build progress {}/{}",
|
|
||||||
progress.current.unwrap_or(0),
|
|
||||||
progress.total.unwrap_or(0)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(mut log) = msg.stream {
|
|
||||||
if log.ends_with('\n') {
|
|
||||||
log.pop();
|
|
||||||
if log.ends_with('\r') {
|
|
||||||
log.pop();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
info!("{log}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(error) = msg.error {
|
|
||||||
warn!("Build error : {error:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(error) = msg.error_detail {
|
|
||||||
warn!("Build error : {error:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("Build failed : {e}");
|
|
||||||
return Err(format!("Build failed : {e}").into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(image_name.to_string())
|
Ok(image_name.to_string())
|
||||||
@@ -241,7 +206,7 @@ impl RustWebapp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///normalizes timestamp and ignores files that will bust the docker cach
|
///normalizes timestamp and ignores files that will bust the docker cache
|
||||||
async fn create_deterministic_tar(
|
async fn create_deterministic_tar(
|
||||||
&self,
|
&self,
|
||||||
project_root: &std::path::Path,
|
project_root: &std::path::Path,
|
||||||
@@ -255,9 +220,7 @@ impl RustWebapp {
|
|||||||
".git",
|
".git",
|
||||||
".github",
|
".github",
|
||||||
".harmony_generated",
|
".harmony_generated",
|
||||||
"harmony",
|
|
||||||
"node_modules",
|
"node_modules",
|
||||||
"Dockerfile.harmony",
|
|
||||||
];
|
];
|
||||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -302,6 +265,8 @@ impl RustWebapp {
|
|||||||
|
|
||||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||||
|
|
||||||
|
// let push_options = PushImageOptionsBuilder::new().tag(tag);
|
||||||
|
|
||||||
let mut push_image_stream = docker.push_image(
|
let mut push_image_stream = docker.push_image(
|
||||||
image_tag,
|
image_tag,
|
||||||
Some(PushImageOptionsBuilder::new().build()),
|
Some(PushImageOptionsBuilder::new().build()),
|
||||||
@@ -309,8 +274,6 @@ impl RustWebapp {
|
|||||||
);
|
);
|
||||||
|
|
||||||
while let Some(msg) = push_image_stream.next().await {
|
while let Some(msg) = push_image_stream.next().await {
|
||||||
// let msg = msg?;
|
|
||||||
// TODO this fails silently, for some reason bollard cannot push to hub.nationtech.io
|
|
||||||
debug!("Message: {msg:?}");
|
debug!("Message: {msg:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -445,10 +408,9 @@ impl RustWebapp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates all necessary files for a basic Helm chart.
|
/// Creates all necessary files for a basic Helm chart.
|
||||||
async fn create_helm_chart_files(
|
fn create_helm_chart_files(
|
||||||
&self,
|
&self,
|
||||||
image_url: &str,
|
image_url: &str,
|
||||||
domain: &str,
|
|
||||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||||
let chart_name = format!("{}-chart", self.name);
|
let chart_name = format!("{}-chart", self.name);
|
||||||
let chart_dir = self
|
let chart_dir = self
|
||||||
@@ -498,15 +460,21 @@ ingress:
|
|||||||
enabled: true
|
enabled: true
|
||||||
# Annotations for cert-manager to handle SSL.
|
# Annotations for cert-manager to handle SSL.
|
||||||
annotations:
|
annotations:
|
||||||
|
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||||
# Add other annotations like nginx ingress class if needed
|
# Add other annotations like nginx ingress class if needed
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
hosts:
|
hosts:
|
||||||
- host: {}
|
- host: chart-example.local
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: ImplementationSpecific
|
pathType: ImplementationSpecific
|
||||||
|
tls:
|
||||||
|
- secretName: {}-tls
|
||||||
|
hosts:
|
||||||
|
- chart-example.local
|
||||||
|
|
||||||
"#,
|
"#,
|
||||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
chart_name, image_repo, image_tag, self.service_port, self.name
|
||||||
);
|
);
|
||||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||||
|
|
||||||
|
|||||||
6
harmony/src/modules/application/stateful.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
use crate::modules::application::Application;
|
||||||
|
|
||||||
|
/// A StatefulApplication is an application bundle that writes persistent data.
|
||||||
|
///
|
||||||
|
/// This will enable backup features, stateful multisite replication, etc.
|
||||||
|
pub trait StatefulApplication: Application {}
|
||||||
26
harmony/src/modules/application/stateless.rs
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
use crate::modules::application::{Application, features::ContinuousDeliveryApplication};
|
||||||
|
|
||||||
|
/// Marker trait for stateless application that can be deployed anywhere without worrying about
|
||||||
|
/// data.
|
||||||
|
///
|
||||||
|
/// This includes Applications fitting these categories :
|
||||||
|
///
|
||||||
|
/// - Application with all files built into the docker image and never written to, can be mounted
|
||||||
|
/// read-only
|
||||||
|
/// - Application writing to hard drive on ephemeral volume that can be lost at anytime and does
|
||||||
|
/// not require any replication/backup logic to operate
|
||||||
|
/// - Not supported : an application that writes state to a volume that must be shared or kept
|
||||||
|
/// to maintain a quorum across various instances
|
||||||
|
/// - Application connecting to a database/datastore accessible from anywhere such as
|
||||||
|
/// - Public bucket endpoint
|
||||||
|
/// - Publicly accessible
|
||||||
|
/// - Application connecting to a private database external to this application, accessible from the
|
||||||
|
/// deployment target
|
||||||
|
/// - Ensuring the private database is reachable is out of scope of this trait (for now)
|
||||||
|
///
|
||||||
|
/// The entire application definition **must not** require any persistent volume or include a
|
||||||
|
/// deployment component depending on persistent data such as a transitive PostgreSQL helm chart.
|
||||||
|
///
|
||||||
|
/// Typically, applications that can be autoscaled without additional complexity fit the
|
||||||
|
/// StatelessApplication requirements.
|
||||||
|
pub trait StatelessApplication: Application + ContinuousDeliveryApplication {}
|
||||||
@@ -69,14 +69,17 @@ impl DhcpInterpret {
|
|||||||
|
|
||||||
dhcp_server.set_pxe_options(pxe_options).await?;
|
dhcp_server.set_pxe_options(pxe_options).await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.boot_filename,
|
format!(
|
||||||
self.score.boot_filename,
|
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
|
||||||
self.score.filename,
|
self.score.boot_filename,
|
||||||
self.score.filename64,
|
self.score.boot_filename,
|
||||||
self.score.filenameipxe
|
self.score.filename,
|
||||||
)))
|
self.score.filename64,
|
||||||
|
self.score.filenameipxe
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,7 +122,8 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
|
|||||||
|
|
||||||
topology.commit_config().await?;
|
topology.commit_config().await?;
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"Dhcp Interpret execution successful".to_string(),
|
"Dhcp Interpret execution successful".to_string(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -193,10 +197,10 @@ impl DhcpHostBindingInterpret {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Dhcp Interpret registered {} entries",
|
InterpretStatus::SUCCESS,
|
||||||
number_new_entries
|
format!("Dhcp Interpret registered {} entries", number_new_entries),
|
||||||
)))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,9 +236,12 @@ impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret {
|
|||||||
|
|
||||||
topology.commit_config().await?;
|
topology.commit_config().await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Dhcp Host Binding Interpret execution successful on {} hosts",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.host_binding.len()
|
format!(
|
||||||
)))
|
"Dhcp Host Binding Interpret execution successful on {} hosts",
|
||||||
|
self.score.host_binding.len()
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,7 +55,8 @@ impl DnsInterpret {
|
|||||||
dns.register_dhcp_leases(register).await?;
|
dns.register_dhcp_leases(register).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"DNS Interpret execution successfull".to_string(),
|
"DNS Interpret execution successfull".to_string(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -67,10 +68,13 @@ impl DnsInterpret {
|
|||||||
let entries = &self.score.dns_entries;
|
let entries = &self.score.dns_entries;
|
||||||
dns_server.ensure_hosts_registered(entries.clone()).await?;
|
dns_server.ensure_hosts_registered(entries.clone()).await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"DnsInterpret registered {} hosts successfully",
|
InterpretStatus::SUCCESS,
|
||||||
entries.len()
|
format!(
|
||||||
)))
|
"DnsInterpret registered {} hosts successfully",
|
||||||
|
entries.len()
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,7 +111,8 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
|
|||||||
|
|
||||||
topology.commit_config().await?;
|
topology.commit_config().await?;
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"Dns Interpret execution successful".to_string(),
|
"Dns Interpret execution successful".to_string(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -153,10 +153,6 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
|||||||
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
||||||
Some(yaml_str) => {
|
Some(yaml_str) => {
|
||||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||||
debug!(
|
|
||||||
"values yaml string for chart {} :\n {yaml_str}",
|
|
||||||
self.score.chart_name
|
|
||||||
);
|
|
||||||
Some(tf.path())
|
Some(tf.path())
|
||||||
}
|
}
|
||||||
None => None,
|
None => None,
|
||||||
@@ -197,10 +193,13 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
|||||||
self.score.release_name, ns
|
self.score.release_name, ns
|
||||||
);
|
);
|
||||||
|
|
||||||
return Ok(Outcome::success(format!(
|
return Ok(Outcome::new(
|
||||||
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.release_name
|
format!(
|
||||||
)));
|
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
||||||
|
self.score.release_name
|
||||||
|
),
|
||||||
|
));
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
||||||
@@ -225,18 +224,18 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match status {
|
match status {
|
||||||
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!(
|
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
|
||||||
"Helm Chart {} deployed",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.release_name
|
format!("Helm Chart {} deployed", self.score.release_name),
|
||||||
))),
|
)),
|
||||||
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!(
|
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
|
||||||
"Helm Chart {} pending install...",
|
InterpretStatus::RUNNING,
|
||||||
self.score.release_name
|
format!("Helm Chart {} pending install...", self.score.release_name),
|
||||||
))),
|
)),
|
||||||
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!(
|
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
|
||||||
"Helm Chart {} pending upgrade...",
|
InterpretStatus::RUNNING,
|
||||||
self.score.release_name
|
format!("Helm Chart {} pending upgrade...", self.score.release_name),
|
||||||
))),
|
)),
|
||||||
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
|
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
|
||||||
"Helm Chart {} installation failed",
|
"Helm Chart {} installation failed",
|
||||||
self.score.release_name
|
self.score.release_name
|
||||||
|
|||||||
@@ -133,9 +133,10 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
Ok(Outcome::success(
|
Ok(Outcome {
|
||||||
"Discovery process completed successfully".to_string(),
|
status: InterpretStatus::SUCCESS,
|
||||||
))
|
message: "Discovery process completed successfully".to_string(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use harmony_macros::ingress_path;
|
use harmony_macros::ingress_path;
|
||||||
use harmony_types::id::Id;
|
|
||||||
use k8s_openapi::api::networking::v1::Ingress;
|
use k8s_openapi::api::networking::v1::Ingress;
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
interpret::Interpret,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{K8sclient, Topology},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
@@ -44,7 +40,6 @@ pub struct K8sIngressScore {
|
|||||||
pub path: Option<IngressPath>,
|
pub path: Option<IngressPath>,
|
||||||
pub path_type: Option<PathType>,
|
pub path_type: Option<PathType>,
|
||||||
pub namespace: Option<fqdn::FQDN>,
|
pub namespace: Option<fqdn::FQDN>,
|
||||||
pub ingress_class_name: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||||
@@ -59,18 +54,12 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
|||||||
None => PathType::Prefix,
|
None => PathType::Prefix,
|
||||||
};
|
};
|
||||||
|
|
||||||
let ingress_class = match self.ingress_class_name.clone() {
|
|
||||||
Some(ingress_class_name) => ingress_class_name,
|
|
||||||
None => "\"default\"".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let ingress = json!(
|
let ingress = json!(
|
||||||
{
|
{
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": self.name.to_string(),
|
"name": self.name.to_string(),
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"ingressClassName": ingress_class.as_str(),
|
|
||||||
"rules": [
|
"rules": [
|
||||||
{ "host": self.host.to_string(),
|
{ "host": self.host.to_string(),
|
||||||
"http": {
|
"http": {
|
||||||
@@ -101,12 +90,11 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
|||||||
"Successfully built Ingress for host {:?}",
|
"Successfully built Ingress for host {:?}",
|
||||||
ingress.metadata.name
|
ingress.metadata.name
|
||||||
);
|
);
|
||||||
|
Box::new(K8sResourceInterpret {
|
||||||
Box::new(K8sIngressInterpret {
|
score: K8sResourceScore::single(
|
||||||
ingress,
|
ingress.clone(),
|
||||||
service: self.name.to_string(),
|
self.namespace.clone().map(|f| f.to_string()),
|
||||||
namespace: self.namespace.clone().map(|f| f.to_string()),
|
),
|
||||||
host: self.host.clone(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,62 +102,3 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
|||||||
format!("{} K8sIngressScore", self.name)
|
format!("{} K8sIngressScore", self.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(std::fmt::Debug)]
|
|
||||||
struct K8sIngressInterpret {
|
|
||||||
ingress: Ingress,
|
|
||||||
service: String,
|
|
||||||
namespace: Option<String>,
|
|
||||||
host: fqdn::FQDN,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for K8sIngressInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let result = K8sResourceInterpret {
|
|
||||||
score: K8sResourceScore::single(self.ingress.clone(), self.namespace.clone()),
|
|
||||||
}
|
|
||||||
.execute(inventory, topology)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => {
|
|
||||||
let details = match &self.namespace {
|
|
||||||
Some(namespace) => {
|
|
||||||
vec![format!(
|
|
||||||
"{} ({namespace}): http://{}",
|
|
||||||
self.service, self.host
|
|
||||||
)]
|
|
||||||
}
|
|
||||||
None => vec![format!("{}: {}", self.service, self.host)],
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(outcome.message, details))
|
|
||||||
}
|
|
||||||
_ => Ok(outcome),
|
|
||||||
},
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::K8sIngress
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
Version::from("0.0.1").unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -147,7 +147,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
|
|||||||
port: 8080,
|
port: 8080,
|
||||||
path: Some(ingress_path),
|
path: Some(ingress_path),
|
||||||
path_type: None,
|
path_type: None,
|
||||||
ingress_class_name: None,
|
|
||||||
namespace: self
|
namespace: self
|
||||||
.get_namespace()
|
.get_namespace()
|
||||||
.map(|nbs| fqdn!(nbs.to_string().as_str())),
|
.map(|nbs| fqdn!(nbs.to_string().as_str())),
|
||||||
|
|||||||
@@ -35,24 +35,6 @@ pub struct DiscordWebhook {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||||
let ns = sender.namespace.clone();
|
|
||||||
let secret_name = format!("{}-secret", self.name.clone());
|
|
||||||
let webhook_key = format!("{}", self.url.clone());
|
|
||||||
|
|
||||||
let mut string_data = BTreeMap::new();
|
|
||||||
string_data.insert("webhook-url".to_string(), webhook_key.clone());
|
|
||||||
|
|
||||||
let secret = Secret {
|
|
||||||
metadata: kube::core::ObjectMeta {
|
|
||||||
name: Some(secret_name.clone()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
string_data: Some(string_data),
|
|
||||||
type_: Some("Opaque".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let _ = sender.client.apply(&secret, Some(&ns)).await;
|
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
"route": {
|
"route": {
|
||||||
@@ -61,14 +43,9 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
|||||||
"receivers": [
|
"receivers": [
|
||||||
{
|
{
|
||||||
"name": self.name,
|
"name": self.name,
|
||||||
"discordConfigs": [
|
"webhookConfigs": [
|
||||||
{
|
{
|
||||||
"apiURL": {
|
"url": self.url,
|
||||||
"name": secret_name,
|
|
||||||
"key": "webhook-url",
|
|
||||||
},
|
|
||||||
"title": "{{ template \"discord.default.title\" . }}",
|
|
||||||
"message": "{{ template \"discord.default.message\" . }}"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,11 +43,6 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
|||||||
"webhookConfigs": [
|
"webhookConfigs": [
|
||||||
{
|
{
|
||||||
"url": self.url,
|
"url": self.url,
|
||||||
"httpConfig": {
|
|
||||||
"tlsConfig": {
|
|
||||||
"insecureSkipVerify": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,9 +68,7 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
|||||||
PreparationOutcome::Success { details: _ } => {
|
PreparationOutcome::Success { details: _ } => {
|
||||||
Ok(Outcome::success("Prometheus installed".into()))
|
Ok(Outcome::success("Prometheus installed".into()))
|
||||||
}
|
}
|
||||||
PreparationOutcome::Noop => {
|
PreparationOutcome::Noop => Ok(Outcome::noop()),
|
||||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
Err(err) => Err(InterpretError::from(err)),
|
Err(err) => Err(InterpretError::from(err)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,9 +70,7 @@ impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret
|
|||||||
PreparationOutcome::Success { details: _ } => {
|
PreparationOutcome::Success { details: _ } => {
|
||||||
Ok(Outcome::success("Prometheus installed".into()))
|
Ok(Outcome::success("Prometheus installed".into()))
|
||||||
}
|
}
|
||||||
PreparationOutcome::Noop => {
|
PreparationOutcome::Noop => Ok(Outcome::noop()),
|
||||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
Err(err) => Err(InterpretError::from(err)),
|
Err(err) => Err(InterpretError::from(err)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,7 @@ use kube::CustomResource;
|
|||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||||
LabelSelector, PrometheusSpec,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
|||||||
@@ -45,12 +45,6 @@ service:
|
|||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
enabled: {ingress_enabled}
|
enabled: {ingress_enabled}
|
||||||
hosts:
|
|
||||||
- host: {host}
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
pathType: ImplementationSpecific
|
|
||||||
|
|
||||||
|
|
||||||
route:
|
route:
|
||||||
enabled: {route_enabled}
|
enabled: {route_enabled}
|
||||||
|
|||||||
@@ -113,13 +113,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
|||||||
.await?;
|
.await?;
|
||||||
info!("user added");
|
info!("user added");
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(
|
Ok(Outcome::success("Ntfy installed".to_string()))
|
||||||
"Ntfy installed".to_string(),
|
|
||||||
vec![format!(
|
|
||||||
"Ntfy ({}): http://{}",
|
|
||||||
self.score.namespace, self.score.host
|
|
||||||
)],
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{error, info, warn};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
hardware::PhysicalHost,
|
hardware::PhysicalHost,
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::inventory::DiscoverHostForRoleScore,
|
modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::info;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
|
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
|
||||||
// - This score exposes/ensures the default inventory assets and waits for discoveries.
|
// - This score exposes/ensures the default inventory assets and waits for discoveries.
|
||||||
@@ -109,9 +109,12 @@ When you can dig them, confirm to continue.
|
|||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Found and assigned bootstrap node: {}",
|
InterpretStatus::SUCCESS,
|
||||||
bootstrap_host.unwrap().summary()
|
format!(
|
||||||
)))
|
"Found and assigned bootstrap node: {}",
|
||||||
|
bootstrap_host.unwrap().summary()
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,25 @@
|
|||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::secret::{RedhatSecret, SshKeyPair},
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
data::{FileContent, FilePath, Version},
|
data::{FileContent, FilePath, Version},
|
||||||
hardware::PhysicalHost,
|
hardware::PhysicalHost,
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::{
|
modules::{
|
||||||
dhcp::DhcpHostBindingScore,
|
dhcp::DhcpHostBindingScore,
|
||||||
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
okd::{
|
okd::{
|
||||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
@@ -16,15 +28,6 @@ use crate::{
|
|||||||
score::Score,
|
score::Score,
|
||||||
topology::{HAClusterTopology, HostBinding},
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::{debug, info};
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 02: Bootstrap
|
// Step 02: Bootstrap
|
||||||
// - Select bootstrap node (from discovered set).
|
// - Select bootstrap node (from discovered set).
|
||||||
@@ -310,7 +313,7 @@ impl OKDSetup02BootstrapInterpret {
|
|||||||
info!("[Bootstrap] Rebooting bootstrap node via SSH");
|
info!("[Bootstrap] Rebooting bootstrap node via SSH");
|
||||||
// TODO reboot programatically, there are some logical checks and refactoring to do such as
|
// TODO reboot programatically, there are some logical checks and refactoring to do such as
|
||||||
// accessing the bootstrap node config (ip address) from the inventory
|
// accessing the bootstrap node config (ip address) from the inventory
|
||||||
let _ = inquire::Confirm::new(
|
let confirmation = inquire::Confirm::new(
|
||||||
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
|
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
|
||||||
)
|
)
|
||||||
.prompt()
|
.prompt()
|
||||||
@@ -376,6 +379,9 @@ impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
|
|||||||
self.reboot_target().await?;
|
self.reboot_target().await?;
|
||||||
self.wait_for_bootstrap_complete().await?;
|
self.wait_for_bootstrap_complete().await?;
|
||||||
|
|
||||||
Ok(Outcome::success("Bootstrap phase complete".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Bootstrap phase complete".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,11 @@
|
|||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{debug, info};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
hardware::PhysicalHost,
|
hardware::PhysicalHost,
|
||||||
@@ -5,20 +13,12 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::{
|
modules::{
|
||||||
dhcp::DhcpHostBindingScore,
|
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
||||||
http::IPxeMacBootFileScore,
|
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
|
||||||
inventory::DiscoverHostForRoleScore,
|
|
||||||
okd::{host_network::HostNetworkConfigurationScore, templates::BootstrapIpxeTpl},
|
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HAClusterTopology, HostBinding},
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::{debug, info};
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 03: Control Plane
|
// Step 03: Control Plane
|
||||||
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
|
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
|
||||||
@@ -30,7 +30,7 @@ pub struct OKDSetup03ControlPlaneScore {}
|
|||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup03ControlPlaneInterpret::new())
|
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
@@ -40,15 +40,17 @@ impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct OKDSetup03ControlPlaneInterpret {
|
pub struct OKDSetup03ControlPlaneInterpret {
|
||||||
|
score: OKDSetup03ControlPlaneScore,
|
||||||
version: Version,
|
version: Version,
|
||||||
status: InterpretStatus,
|
status: InterpretStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OKDSetup03ControlPlaneInterpret {
|
impl OKDSetup03ControlPlaneInterpret {
|
||||||
pub fn new() -> Self {
|
pub fn new(score: OKDSetup03ControlPlaneScore) -> Self {
|
||||||
let version = Version::from("1.0.0").unwrap();
|
let version = Version::from("1.0.0").unwrap();
|
||||||
Self {
|
Self {
|
||||||
version,
|
version,
|
||||||
|
score,
|
||||||
status: InterpretStatus::QUEUED,
|
status: InterpretStatus::QUEUED,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -159,7 +161,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
}
|
}
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
debug!("[ControlPlane] iPXE content template:\n{content}");
|
debug!("[ControlPlane] iPXE content template:\n{}", content);
|
||||||
|
|
||||||
// Create and apply an iPXE boot file for each node.
|
// Create and apply an iPXE boot file for each node.
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
@@ -189,13 +191,16 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
/// Prompts the user to reboot the target control plane nodes.
|
/// Prompts the user to reboot the target control plane nodes.
|
||||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||||
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
|
info!(
|
||||||
|
"[ControlPlane] Requesting reboot for control plane nodes: {:?}",
|
||||||
|
node_ids
|
||||||
|
);
|
||||||
|
|
||||||
let confirmation = inquire::Confirm::new(
|
let confirmation = inquire::Confirm::new(
|
||||||
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
||||||
)
|
)
|
||||||
.prompt()
|
.prompt()
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
|
||||||
|
|
||||||
if !confirmation {
|
if !confirmation {
|
||||||
return Err(InterpretError::new(
|
return Err(InterpretError::new(
|
||||||
@@ -207,23 +212,14 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Placeholder for automating network bonding configuration.
|
/// Placeholder for automating network bonding configuration.
|
||||||
async fn persist_network_bond(
|
async fn persist_network_bond(&self) -> Result<(), InterpretError> {
|
||||||
&self,
|
// Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
|
||||||
inventory: &Inventory,
|
info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP");
|
||||||
topology: &HAClusterTopology,
|
|
||||||
hosts: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!("[ControlPlane] Ensuring persistent bonding");
|
|
||||||
let score = HostNetworkConfigurationScore {
|
|
||||||
hosts: hosts.clone(), // FIXME: Avoid clone if possible
|
|
||||||
};
|
|
||||||
score.interpret(inventory, topology).await?;
|
|
||||||
|
|
||||||
inquire::Confirm::new(
|
inquire::Confirm::new(
|
||||||
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
|
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
|
||||||
)
|
)
|
||||||
.prompt()
|
.prompt()
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -266,15 +262,15 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
|||||||
self.reboot_targets(&nodes).await?;
|
self.reboot_targets(&nodes).await?;
|
||||||
|
|
||||||
// 5. Placeholder for post-boot network configuration (e.g., bonding).
|
// 5. Placeholder for post-boot network configuration (e.g., bonding).
|
||||||
self.persist_network_bond(inventory, topology, &nodes)
|
self.persist_network_bond().await?;
|
||||||
.await?;
|
|
||||||
|
|
||||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||||
// and for the cluster operators to become available. This would be similar to
|
// and for the cluster operators to become available. This would be similar to
|
||||||
// the `wait-for bootstrap-complete` command.
|
// the `wait-for bootstrap-complete` command.
|
||||||
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"Control plane provisioning has been successfully initiated.".into(),
|
"Control plane provisioning has been successfully initiated.".into(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,33 @@
|
|||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::info;
|
use log::{debug, error, info, warn};
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
|
data::{FileContent, FilePath, Version},
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
|
okd::{
|
||||||
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
|
},
|
||||||
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 04: Workers
|
// Step 04: Workers
|
||||||
// - Render per-MAC PXE & ignition for workers; join nodes.
|
// - Render per-MAC PXE & ignition for workers; join nodes.
|
||||||
@@ -78,6 +94,9 @@ impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
|||||||
_topology: &HAClusterTopology,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.render_and_reboot().await?;
|
self.render_and_reboot().await?;
|
||||||
Ok(Outcome::success("Workers provisioned".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Workers provisioned".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,33 @@
|
|||||||
use crate::{
|
use std::{fmt::Write, path::PathBuf};
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::HAClusterTopology,
|
|
||||||
};
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::info;
|
use log::{debug, error, info, warn};
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
|
data::{FileContent, FilePath, Version},
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
|
okd::{
|
||||||
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{HAClusterTopology, HostBinding},
|
||||||
|
};
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 05: Sanity Check
|
// Step 05: Sanity Check
|
||||||
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
|
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
|
||||||
@@ -76,6 +93,9 @@ impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
|
|||||||
_topology: &HAClusterTopology,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.run_checks().await?;
|
self.run_checks().await?;
|
||||||
Ok(Outcome::success("Sanity checks passed".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Sanity checks passed".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,32 @@
|
|||||||
|
// -------------------------------------------------------------------------------------------------
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::info;
|
use log::{debug, error, info, warn};
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
|
data::{FileContent, FilePath, Version},
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
|
okd::{
|
||||||
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
|
},
|
||||||
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Step 06: Installation Report
|
// Step 06: Installation Report
|
||||||
@@ -76,6 +93,9 @@ impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
|
|||||||
_topology: &HAClusterTopology,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.generate().await?;
|
self.generate().await?;
|
||||||
Ok(Outcome::success("Installation report generated".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Installation report generated".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,41 +0,0 @@
|
|||||||
use kube::CustomResource;
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
pub mod nmstate;
|
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "operators.coreos.com",
|
|
||||||
version = "v1",
|
|
||||||
kind = "OperatorGroup",
|
|
||||||
namespaced
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct OperatorGroupSpec {
|
|
||||||
pub target_namespaces: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "operators.coreos.com",
|
|
||||||
version = "v1alpha1",
|
|
||||||
kind = "Subscription",
|
|
||||||
namespaced
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct SubscriptionSpec {
|
|
||||||
pub name: String,
|
|
||||||
pub source: String,
|
|
||||||
pub source_namespace: String,
|
|
||||||
pub channel: Option<String>,
|
|
||||||
pub install_plan_approval: Option<InstallPlanApproval>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
pub enum InstallPlanApproval {
|
|
||||||
#[serde(rename = "Automatic")]
|
|
||||||
Automatic,
|
|
||||||
#[serde(rename = "Manual")]
|
|
||||||
Manual,
|
|
||||||
}
|
|
||||||
@@ -1,251 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use kube::CustomResource;
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(group = "nmstate.io", version = "v1", kind = "NMState", namespaced)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NMStateSpec {
|
|
||||||
pub probe_configuration: Option<ProbeConfig>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for NMState {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
metadata: Default::default(),
|
|
||||||
spec: NMStateSpec {
|
|
||||||
probe_configuration: None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ProbeConfig {
|
|
||||||
pub dns: ProbeDns,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ProbeDns {
|
|
||||||
pub host: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "nmstate.io",
|
|
||||||
version = "v1",
|
|
||||||
kind = "NodeNetworkConfigurationPolicy",
|
|
||||||
namespaced
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NodeNetworkConfigurationPolicySpec {
|
|
||||||
pub node_selector: Option<BTreeMap<String, String>>,
|
|
||||||
pub desired_state: DesiredStateSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct DesiredStateSpec {
|
|
||||||
pub interfaces: Vec<InterfaceSpec>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct InterfaceSpec {
|
|
||||||
pub name: String,
|
|
||||||
pub description: Option<String>,
|
|
||||||
pub r#type: String,
|
|
||||||
pub state: String,
|
|
||||||
pub mac_address: Option<String>,
|
|
||||||
pub mtu: Option<u32>,
|
|
||||||
pub controller: Option<String>,
|
|
||||||
pub ipv4: Option<IpStackSpec>,
|
|
||||||
pub ipv6: Option<IpStackSpec>,
|
|
||||||
pub ethernet: Option<EthernetSpec>,
|
|
||||||
pub link_aggregation: Option<BondSpec>,
|
|
||||||
pub vlan: Option<VlanSpec>,
|
|
||||||
pub vxlan: Option<VxlanSpec>,
|
|
||||||
pub mac_vtap: Option<MacVtapSpec>,
|
|
||||||
pub mac_vlan: Option<MacVlanSpec>,
|
|
||||||
pub infiniband: Option<InfinibandSpec>,
|
|
||||||
pub linux_bridge: Option<LinuxBridgeSpec>,
|
|
||||||
pub ovs_bridge: Option<OvsBridgeSpec>,
|
|
||||||
pub ethtool: Option<EthtoolSpec>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct IpStackSpec {
|
|
||||||
pub enabled: Option<bool>,
|
|
||||||
pub dhcp: Option<bool>,
|
|
||||||
pub autoconf: Option<bool>,
|
|
||||||
pub address: Option<Vec<IpAddressSpec>>,
|
|
||||||
pub auto_dns: Option<bool>,
|
|
||||||
pub auto_gateway: Option<bool>,
|
|
||||||
pub auto_routes: Option<bool>,
|
|
||||||
pub dhcp_client_id: Option<String>,
|
|
||||||
pub dhcp_duid: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct IpAddressSpec {
|
|
||||||
pub ip: String,
|
|
||||||
pub prefix_length: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct EthernetSpec {
|
|
||||||
pub speed: Option<u32>,
|
|
||||||
pub duplex: Option<String>,
|
|
||||||
pub auto_negotiation: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct BondSpec {
|
|
||||||
pub mode: String,
|
|
||||||
pub ports: Vec<String>,
|
|
||||||
pub options: Option<BTreeMap<String, Value>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct VlanSpec {
|
|
||||||
pub base_iface: String,
|
|
||||||
pub id: u16,
|
|
||||||
pub protocol: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct VxlanSpec {
|
|
||||||
pub base_iface: String,
|
|
||||||
pub id: u32,
|
|
||||||
pub remote: String,
|
|
||||||
pub local: Option<String>,
|
|
||||||
pub learning: Option<bool>,
|
|
||||||
pub destination_port: Option<u16>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct MacVtapSpec {
|
|
||||||
pub base_iface: String,
|
|
||||||
pub mode: String,
|
|
||||||
pub promiscuous: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct MacVlanSpec {
|
|
||||||
pub base_iface: String,
|
|
||||||
pub mode: String,
|
|
||||||
pub promiscuous: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct InfinibandSpec {
|
|
||||||
pub base_iface: String,
|
|
||||||
pub pkey: String,
|
|
||||||
pub mode: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct LinuxBridgeSpec {
|
|
||||||
pub options: Option<LinuxBridgeOptions>,
|
|
||||||
pub ports: Option<Vec<LinuxBridgePort>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct LinuxBridgeOptions {
|
|
||||||
pub mac_ageing_time: Option<u32>,
|
|
||||||
pub multicast_snooping: Option<bool>,
|
|
||||||
pub stp: Option<StpOptions>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct StpOptions {
|
|
||||||
pub enabled: Option<bool>,
|
|
||||||
pub forward_delay: Option<u16>,
|
|
||||||
pub hello_time: Option<u16>,
|
|
||||||
pub max_age: Option<u16>,
|
|
||||||
pub priority: Option<u16>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct LinuxBridgePort {
|
|
||||||
pub name: String,
|
|
||||||
pub vlan: Option<LinuxBridgePortVlan>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct LinuxBridgePortVlan {
|
|
||||||
pub mode: Option<String>,
|
|
||||||
pub trunk_tags: Option<Vec<VlanTag>>,
|
|
||||||
pub tag: Option<u16>,
|
|
||||||
pub enable_native: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct VlanTag {
|
|
||||||
pub id: u16,
|
|
||||||
pub id_range: Option<VlanIdRange>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct VlanIdRange {
|
|
||||||
pub min: u16,
|
|
||||||
pub max: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct OvsBridgeSpec {
|
|
||||||
pub options: Option<OvsBridgeOptions>,
|
|
||||||
pub ports: Option<Vec<OvsPortSpec>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct OvsBridgeOptions {
|
|
||||||
pub stp: Option<bool>,
|
|
||||||
pub rstp: Option<bool>,
|
|
||||||
pub mcast_snooping_enable: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct OvsPortSpec {
|
|
||||||
pub name: String,
|
|
||||||
pub link_aggregation: Option<BondSpec>,
|
|
||||||
pub vlan: Option<LinuxBridgePortVlan>,
|
|
||||||
pub r#type: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct EthtoolSpec {
|
|
||||||
// FIXME: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct EthtoolFecSpec {
|
|
||||||
pub auto: Option<bool>,
|
|
||||||
pub mode: Option<String>,
|
|
||||||
}
|
|
||||||
@@ -1,340 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::{debug, info, warn};
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
hardware::PhysicalHost,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct HostNetworkConfigurationScore {
|
|
||||||
pub hosts: Vec<PhysicalHost>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"HostNetworkConfigurationScore".into()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(HostNetworkConfigurationInterpret {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct HostNetworkConfigurationInterpret {
|
|
||||||
score: HostNetworkConfigurationScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
info!(
|
|
||||||
"Started network configuration for {} host(s)...",
|
|
||||||
self.score.hosts.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut configured_host_count = 0;
|
|
||||||
|
|
||||||
for host in &self.score.hosts {
|
|
||||||
let mut switch_ports = vec![];
|
|
||||||
|
|
||||||
for network_interface in &host.network {
|
|
||||||
let mac_address = network_interface.mac_address;
|
|
||||||
|
|
||||||
match topology.get_port_for_mac_address(&mac_address).await {
|
|
||||||
Ok(Some(port)) => {
|
|
||||||
switch_ports.push(SwitchPort {
|
|
||||||
interface: NetworkInterface {
|
|
||||||
name: network_interface.name.clone(),
|
|
||||||
mac_address,
|
|
||||||
speed_mbps: network_interface.speed_mbps,
|
|
||||||
mtu: network_interface.mtu,
|
|
||||||
},
|
|
||||||
port,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
|
|
||||||
Err(e) => warn!("Failed to get port for host '{}': {}", host.id, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !switch_ports.is_empty() {
|
|
||||||
configured_host_count += 1;
|
|
||||||
topology
|
|
||||||
.configure_host_network(host, HostNetworkConfig { switch_ports })
|
|
||||||
.await
|
|
||||||
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if configured_host_count > 0 {
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"Configured {configured_host_count}/{} host(s)",
|
|
||||||
self.score.hosts.len()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
Ok(Outcome::noop("No hosts configured".into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use assertor::*;
|
|
||||||
use harmony_types::{net::MacAddress, switch::PortLocation};
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
hardware::HostCategory,
|
|
||||||
topology::{
|
|
||||||
HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
str::FromStr,
|
|
||||||
sync::{Arc, Mutex},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
pub static ref HOST_ID: Id = Id::from_str("host-1").unwrap();
|
|
||||||
pub static ref ANOTHER_HOST_ID: Id = Id::from_str("host-2").unwrap();
|
|
||||||
pub static ref EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
|
||||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F1".to_string()).unwrap(),
|
|
||||||
name: "interface-1".into(),
|
|
||||||
speed_mbps: None,
|
|
||||||
mtu: 1,
|
|
||||||
};
|
|
||||||
pub static ref ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
|
||||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F2".to_string()).unwrap(),
|
|
||||||
name: "interface-2".into(),
|
|
||||||
speed_mbps: None,
|
|
||||||
mtu: 1,
|
|
||||||
};
|
|
||||||
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
|
||||||
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
|
||||||
name: "unknown-interface".into(),
|
|
||||||
speed_mbps: None,
|
|
||||||
mtu: 1,
|
|
||||||
};
|
|
||||||
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
|
||||||
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
|
||||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
|
||||||
let score = given_score(vec![host]);
|
|
||||||
let topology = TopologyWithSwitch::new();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
|
||||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
|
||||||
HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
switch_ports: vec![SwitchPort {
|
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
|
||||||
port: PORT.clone(),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
)]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
|
|
||||||
let score = given_score(vec![given_host(
|
|
||||||
&HOST_ID,
|
|
||||||
vec![
|
|
||||||
EXISTING_INTERFACE.clone(),
|
|
||||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
],
|
|
||||||
)]);
|
|
||||||
let topology = TopologyWithSwitch::new();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
|
||||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
|
||||||
HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
switch_ports: vec![
|
|
||||||
SwitchPort {
|
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
|
||||||
port: PORT.clone(),
|
|
||||||
},
|
|
||||||
SwitchPort {
|
|
||||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
port: ANOTHER_PORT.clone(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
)]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn multiple_hosts_should_create_one_bond_per_host() {
|
|
||||||
let score = given_score(vec![
|
|
||||||
given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
|
|
||||||
given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
|
|
||||||
]);
|
|
||||||
let topology = TopologyWithSwitch::new();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
|
||||||
assert_that!(*configured_host_networks).contains_exactly(vec![
|
|
||||||
(
|
|
||||||
HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
switch_ports: vec![SwitchPort {
|
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
|
||||||
port: PORT.clone(),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
ANOTHER_HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
switch_ports: vec![SwitchPort {
|
|
||||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
port: ANOTHER_PORT.clone(),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
|
||||||
// FIXME: Should it still configure an empty bond/port channel?
|
|
||||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
|
||||||
let topology = TopologyWithSwitch::new_port_not_found();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
|
||||||
assert_that!(*configured_host_networks).is_empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
|
||||||
HostNetworkConfigurationScore { hosts }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_host(id: &Id, network_interfaces: Vec<NetworkInterface>) -> PhysicalHost {
|
|
||||||
let network = network_interfaces.iter().map(given_interface).collect();
|
|
||||||
|
|
||||||
PhysicalHost {
|
|
||||||
id: id.clone(),
|
|
||||||
category: HostCategory::Server,
|
|
||||||
network,
|
|
||||||
storage: vec![],
|
|
||||||
labels: vec![],
|
|
||||||
memory_modules: vec![],
|
|
||||||
cpus: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn given_interface(
|
|
||||||
interface: &NetworkInterface,
|
|
||||||
) -> harmony_inventory_agent::hwinfo::NetworkInterface {
|
|
||||||
harmony_inventory_agent::hwinfo::NetworkInterface {
|
|
||||||
name: interface.name.clone(),
|
|
||||||
mac_address: interface.mac_address,
|
|
||||||
speed_mbps: interface.speed_mbps,
|
|
||||||
is_up: true,
|
|
||||||
mtu: interface.mtu,
|
|
||||||
ipv4_addresses: vec![],
|
|
||||||
ipv6_addresses: vec![],
|
|
||||||
driver: "driver".into(),
|
|
||||||
firmware_version: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TopologyWithSwitch {
|
|
||||||
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
|
||||||
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TopologyWithSwitch {
|
|
||||||
fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
|
||||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_port_not_found() -> Self {
|
|
||||||
Self {
|
|
||||||
available_ports: Arc::new(Mutex::new(vec![])),
|
|
||||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Topology for TopologyWithSwitch {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
"SwitchWithPortTopology"
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
Ok(PreparationOutcome::Success { details: "".into() })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Switch for TopologyWithSwitch {
|
|
||||||
async fn get_port_for_mac_address(
|
|
||||||
&self,
|
|
||||||
_mac_address: &MacAddress,
|
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
|
||||||
let mut ports = self.available_ports.lock().unwrap();
|
|
||||||
if ports.is_empty() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
Ok(Some(ports.remove(0)))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_host_network(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
|
||||||
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
|
||||||
configured_host_networks.push((host.id.clone(), config.clone()));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -19,5 +19,3 @@ pub use bootstrap_03_control_plane::*;
|
|||||||
pub use bootstrap_04_workers::*;
|
pub use bootstrap_04_workers::*;
|
||||||
pub use bootstrap_05_sanity_check::*;
|
pub use bootstrap_05_sanity_check::*;
|
||||||
pub use bootstrap_06_installation_report::*;
|
pub use bootstrap_06_installation_report::*;
|
||||||
pub mod crd;
|
|
||||||
pub mod host_network;
|
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule {
|
|||||||
pub fn alert_container_restarting() -> PrometheusAlertRule {
|
pub fn alert_container_restarting() -> PrometheusAlertRule {
|
||||||
PrometheusAlertRule {
|
PrometheusAlertRule {
|
||||||
alert: "ContainerRestarting".into(),
|
alert: "ContainerRestarting".into(),
|
||||||
expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(),
|
expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(),
|
||||||
r#for: Some("30s".into()),
|
r#for: Some("5m".into()),
|
||||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||||
annotations: HashMap::from([
|
annotations: HashMap::from([
|
||||||
(
|
(
|
||||||
@@ -42,7 +42,7 @@ pub fn alert_pod_not_ready() -> PrometheusAlertRule {
|
|||||||
PrometheusAlertRule {
|
PrometheusAlertRule {
|
||||||
alert: "PodNotReady".into(),
|
alert: "PodNotReady".into(),
|
||||||
expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(),
|
expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(),
|
||||||
r#for: Some("30s".into()),
|
r#for: Some("2m".into()),
|
||||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||||
annotations: HashMap::from([
|
annotations: HashMap::from([
|
||||||
("summary".into(), "Pod is not ready".into()),
|
("summary".into(), "Pod is not ready".into()),
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use fqdn::fqdn;
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
@@ -9,9 +8,11 @@ use log::{debug, info};
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
||||||
|
Alertmanager, AlertmanagerSpec,
|
||||||
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
|
||||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||||
GrafanaDatasourceSpec, GrafanaSpec,
|
GrafanaDatasourceSpec, GrafanaSpec,
|
||||||
@@ -28,7 +29,6 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
|||||||
ServiceMonitor, ServiceMonitorSpec,
|
ServiceMonitor, ServiceMonitorSpec,
|
||||||
};
|
};
|
||||||
use crate::score::Score;
|
use crate::score::Score;
|
||||||
use crate::topology::ingress::Ingress;
|
|
||||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -48,8 +48,8 @@ pub struct RHOBAlertingScore {
|
|||||||
pub prometheus_rules: Vec<RuleGroup>,
|
pub prometheus_rules: Vec<RuleGroup>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
||||||
Score<T> for RHOBAlertingScore
|
for RHOBAlertingScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
Box::new(RHOBAlertingInterpret {
|
Box::new(RHOBAlertingInterpret {
|
||||||
@@ -74,20 +74,19 @@ pub struct RHOBAlertingInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
||||||
Interpret<T> for RHOBAlertingInterpret
|
for RHOBAlertingInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let client = topology.k8s_client().await.unwrap();
|
let client = topology.k8s_client().await.unwrap();
|
||||||
self.ensure_grafana_operator().await?;
|
self.ensure_grafana_operator().await?;
|
||||||
self.install_prometheus(inventory, topology, &client)
|
self.install_prometheus(&client).await?;
|
||||||
.await?;
|
|
||||||
self.install_client_kube_metrics().await?;
|
self.install_client_kube_metrics().await?;
|
||||||
self.install_grafana(inventory, topology, &client).await?;
|
self.install_grafana(&client).await?;
|
||||||
self.install_receivers(&self.sender, &self.receivers)
|
self.install_receivers(&self.sender, &self.receivers)
|
||||||
.await?;
|
.await?;
|
||||||
self.install_rules(&self.prometheus_rules, &client).await?;
|
self.install_rules(&self.prometheus_rules, &client).await?;
|
||||||
@@ -213,8 +212,7 @@ impl RHOBAlertingInterpret {
|
|||||||
|
|
||||||
let output = Command::new("helm")
|
let output = Command::new("helm")
|
||||||
.args([
|
.args([
|
||||||
"upgrade",
|
"install",
|
||||||
"--install",
|
|
||||||
"grafana-operator",
|
"grafana-operator",
|
||||||
"grafana-operator/grafana-operator",
|
"grafana-operator/grafana-operator",
|
||||||
"--namespace",
|
"--namespace",
|
||||||
@@ -228,7 +226,7 @@ impl RHOBAlertingInterpret {
|
|||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!(
|
||||||
"helm upgrade --install failed:\nstdout: {}\nstderr: {}",
|
"helm install failed:\nstdout: {}\nstderr: {}",
|
||||||
String::from_utf8_lossy(&output.stdout),
|
String::from_utf8_lossy(&output.stdout),
|
||||||
String::from_utf8_lossy(&output.stderr)
|
String::from_utf8_lossy(&output.stderr)
|
||||||
)));
|
)));
|
||||||
@@ -240,31 +238,25 @@ impl RHOBAlertingInterpret {
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn install_prometheus<T: Topology + K8sclient + Ingress>(
|
async fn install_prometheus(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
debug!(
|
debug!(
|
||||||
"installing crd-prometheuses in namespace {}",
|
"installing crd-prometheuses in namespace {}",
|
||||||
self.sender.namespace.clone()
|
self.sender.namespace.clone()
|
||||||
);
|
);
|
||||||
debug!("building role/rolebinding/serviceaccount for crd-prometheus");
|
|
||||||
|
|
||||||
let stack = MonitoringStack {
|
let stack = MonitoringStack {
|
||||||
metadata: ObjectMeta {
|
metadata: ObjectMeta {
|
||||||
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
|
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
|
||||||
namespace: Some(self.sender.namespace.clone()),
|
namespace: Some(self.sender.namespace.clone()),
|
||||||
labels: Some([("monitoring-stack".into(), "true".into())].into()),
|
labels: Some([("coo".into(), "example".into())].into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
spec: MonitoringStackSpec {
|
spec: MonitoringStackSpec {
|
||||||
log_level: Some("debug".into()),
|
log_level: Some("debug".into()),
|
||||||
retention: Some("1d".into()),
|
retention: Some("1d".into()),
|
||||||
resource_selector: Some(LabelSelector {
|
resource_selector: Some(LabelSelector {
|
||||||
match_labels: Default::default(),
|
match_labels: [("app".into(), "demo".into())].into(),
|
||||||
match_expressions: vec![],
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -273,42 +265,6 @@ impl RHOBAlertingInterpret {
|
|||||||
.apply(&stack, Some(&self.sender.namespace.clone()))
|
.apply(&stack, Some(&self.sender.namespace.clone()))
|
||||||
.await
|
.await
|
||||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
|
||||||
let alert_manager_domain = topology
|
|
||||||
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
|
|
||||||
.await?;
|
|
||||||
let name = format!("{}-alert-manager", self.sender.namespace.clone());
|
|
||||||
let backend_service = format!("alertmanager-operated");
|
|
||||||
let namespace = self.sender.namespace.clone();
|
|
||||||
let alert_manager_ingress = K8sIngressScore {
|
|
||||||
name: fqdn!(&name),
|
|
||||||
host: fqdn!(&alert_manager_domain),
|
|
||||||
backend_service: fqdn!(&backend_service),
|
|
||||||
port: 9093,
|
|
||||||
path: Some("/".to_string()),
|
|
||||||
path_type: Some(PathType::Prefix),
|
|
||||||
namespace: Some(fqdn!(&namespace)),
|
|
||||||
ingress_class_name: Some("openshift-default".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let prometheus_domain = topology
|
|
||||||
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
|
|
||||||
.await?;
|
|
||||||
let name = format!("{}-prometheus", self.sender.namespace.clone());
|
|
||||||
let backend_service = format!("prometheus-operated");
|
|
||||||
let prometheus_ingress = K8sIngressScore {
|
|
||||||
name: fqdn!(&name),
|
|
||||||
host: fqdn!(&prometheus_domain),
|
|
||||||
backend_service: fqdn!(&backend_service),
|
|
||||||
port: 9090,
|
|
||||||
path: Some("/".to_string()),
|
|
||||||
path_type: Some(PathType::Prefix),
|
|
||||||
namespace: Some(fqdn!(&namespace)),
|
|
||||||
ingress_class_name: Some("openshift-default".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
alert_manager_ingress.interpret(inventory, topology).await?;
|
|
||||||
prometheus_ingress.interpret(inventory, topology).await?;
|
|
||||||
info!("installed rhob monitoring stack",);
|
info!("installed rhob monitoring stack",);
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"successfully deployed rhob-prometheus {:#?}",
|
"successfully deployed rhob-prometheus {:#?}",
|
||||||
@@ -316,6 +272,31 @@ impl RHOBAlertingInterpret {
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn install_alert_manager(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let am = Alertmanager {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(self.sender.namespace.clone()),
|
||||||
|
labels: Some(std::collections::BTreeMap::from([(
|
||||||
|
"alertmanagerConfig".to_string(),
|
||||||
|
"enabled".to_string(),
|
||||||
|
)])),
|
||||||
|
namespace: Some(self.sender.namespace.clone()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: AlertmanagerSpec::default(),
|
||||||
|
};
|
||||||
|
client
|
||||||
|
.apply(&am, Some(&self.sender.namespace.clone()))
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"successfully deployed service monitor {:#?}",
|
||||||
|
am.metadata.name
|
||||||
|
)))
|
||||||
|
}
|
||||||
async fn install_monitors(
|
async fn install_monitors(
|
||||||
&self,
|
&self,
|
||||||
mut monitors: Vec<ServiceMonitor>,
|
mut monitors: Vec<ServiceMonitor>,
|
||||||
@@ -398,12 +379,7 @@ impl RHOBAlertingInterpret {
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn install_grafana<T: Topology + K8sclient + Ingress>(
|
async fn install_grafana(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let mut label = BTreeMap::new();
|
let mut label = BTreeMap::new();
|
||||||
label.insert("dashboards".to_string(), "grafana".to_string());
|
label.insert("dashboards".to_string(), "grafana".to_string());
|
||||||
let labels = LabelSelector {
|
let labels = LabelSelector {
|
||||||
@@ -489,23 +465,6 @@ impl RHOBAlertingInterpret {
|
|||||||
.apply(&grafana, Some(&self.sender.namespace.clone()))
|
.apply(&grafana, Some(&self.sender.namespace.clone()))
|
||||||
.await
|
.await
|
||||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
let domain = topology
|
|
||||||
.get_domain(&format!("grafana-{}", self.sender.namespace.clone()))
|
|
||||||
.await?;
|
|
||||||
let name = format!("{}-grafana", self.sender.namespace.clone());
|
|
||||||
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
|
|
||||||
let grafana_ingress = K8sIngressScore {
|
|
||||||
name: fqdn!(&name),
|
|
||||||
host: fqdn!(&domain),
|
|
||||||
backend_service: fqdn!(&backend_service),
|
|
||||||
port: 3000,
|
|
||||||
path: Some("/".to_string()),
|
|
||||||
path_type: Some(PathType::Prefix),
|
|
||||||
namespace: Some(fqdn!(&namespace)),
|
|
||||||
ingress_class_name: Some("openshift-default".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
grafana_ingress.interpret(inventory, topology).await?;
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"successfully deployed grafana instance {:#?}",
|
"successfully deployed grafana instance {:#?}",
|
||||||
grafana.metadata.name
|
grafana.metadata.name
|
||||||
|
|||||||