Compare commits
15 Commits
feat/cnpgO
...
feat/rebui
| Author | SHA1 | Date | |
|---|---|---|---|
| 07e610c54a | |||
| 03e98a51e3 | |||
| 22875fe8f3 | |||
| c6f859f973 | |||
| bbf28a1a28 | |||
| bfdb11b217 | |||
| d5fadf4f44 | |||
| 50bd5c5bba | |||
| 43a17811cc | |||
| 29c82db70d | |||
| 8ee3f8a4ad | |||
| d3634a6313 | |||
| a0a8d5277c | |||
| 43b04edbae | |||
| 755a4b7749 |
46
Cargo.lock
generated
46
Cargo.lock
generated
@@ -690,6 +690,23 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "brocade-switch"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"brocade",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"serde",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "brotli"
|
||||
version = "8.0.2"
|
||||
@@ -2479,6 +2496,19 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_inventory_builder"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cidr",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_macros"
|
||||
version = "0.1.0"
|
||||
@@ -2544,6 +2574,7 @@ dependencies = [
|
||||
name = "harmony_types"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"rand 0.9.2",
|
||||
"serde",
|
||||
"url",
|
||||
@@ -6049,6 +6080,21 @@ version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
|
||||
|
||||
[[package]]
|
||||
name = "test-score"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.69"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
use brocade::{BrocadeOptions, ssh};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use harmony_types::switch::PortLocation;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -16,23 +16,28 @@ async fn main() {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); // brocade @ sto1
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
||||
let switch_addresses = vec![ip];
|
||||
|
||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.unwrap();
|
||||
// let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
// .await
|
||||
// .unwrap();
|
||||
|
||||
let brocade = brocade::init(
|
||||
&switch_addresses,
|
||||
22,
|
||||
&config.username,
|
||||
&config.password,
|
||||
Some(BrocadeOptions {
|
||||
// &config.username,
|
||||
// &config.password,
|
||||
"admin",
|
||||
"password",
|
||||
BrocadeOptions {
|
||||
dry_run: true,
|
||||
ssh: ssh::SshOptions {
|
||||
port: 2222,
|
||||
..Default::default()
|
||||
}),
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Brocade client failed to connect");
|
||||
@@ -54,6 +59,7 @@ async fn main() {
|
||||
}
|
||||
|
||||
println!("--------------");
|
||||
todo!();
|
||||
let channel_name = "1";
|
||||
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ impl BrocadeClient for FastIronClient {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
_interfaces: Vec<(String, PortOperatingMode)>,
|
||||
_interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
@@ -14,11 +14,12 @@ use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||
use regex::Regex;
|
||||
use serde::Serialize;
|
||||
|
||||
mod fast_iron;
|
||||
mod network_operating_system;
|
||||
mod shell;
|
||||
mod ssh;
|
||||
pub mod ssh;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct BrocadeOptions {
|
||||
@@ -118,7 +119,7 @@ impl fmt::Display for InterfaceType {
|
||||
}
|
||||
|
||||
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize)]
|
||||
pub enum PortOperatingMode {
|
||||
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
||||
Fabric,
|
||||
@@ -141,12 +142,11 @@ pub enum InterfaceStatus {
|
||||
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
||||
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
||||
let shell = BrocadeShell::init(ip_addresses, username, password, options).await?;
|
||||
|
||||
let version_info = shell
|
||||
.with_session(ExecutionMode::Regular, |session| {
|
||||
@@ -208,7 +208,7 @@ pub trait BrocadeClient: std::fmt::Debug {
|
||||
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Scans the existing configuration to find the next available (unused)
|
||||
|
||||
@@ -187,7 +187,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
||||
|
||||
@@ -204,9 +204,12 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
PortOperatingMode::Trunk => {
|
||||
commands.push("switchport".into());
|
||||
commands.push("switchport mode trunk".into());
|
||||
commands.push("no spanning-tree shutdown".into());
|
||||
commands.push("switchport trunk allowed vlan all".into());
|
||||
commands.push("no switchport trunk tag native-vlan".into());
|
||||
commands.push("spanning-tree shutdown".into());
|
||||
commands.push("no fabric isl enable".into());
|
||||
commands.push("no fabric trunk enable".into());
|
||||
commands.push("no shutdown".into());
|
||||
}
|
||||
PortOperatingMode::Access => {
|
||||
commands.push("switchport".into());
|
||||
|
||||
@@ -16,7 +16,6 @@ use tokio::time::timeout;
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeShell {
|
||||
ip: IpAddr,
|
||||
port: u16,
|
||||
username: String,
|
||||
password: String,
|
||||
options: BrocadeOptions,
|
||||
@@ -27,33 +26,31 @@ pub struct BrocadeShell {
|
||||
impl BrocadeShell {
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Self, Error> {
|
||||
let ip = ip_addresses
|
||||
.first()
|
||||
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
||||
|
||||
let base_options = options.unwrap_or_default();
|
||||
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
||||
let brocade_ssh_client_options =
|
||||
ssh::try_init_client(username, password, ip, options).await?;
|
||||
|
||||
Ok(Self {
|
||||
ip: *ip,
|
||||
port,
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
before_all_commands: vec![],
|
||||
after_all_commands: vec![],
|
||||
options,
|
||||
options: brocade_ssh_client_options,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
||||
BrocadeSession::open(
|
||||
self.ip,
|
||||
self.port,
|
||||
self.options.ssh.port,
|
||||
&self.username,
|
||||
&self.password,
|
||||
self.options.clone(),
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use russh::client::Handler;
|
||||
use russh::kex::DH_G1_SHA1;
|
||||
use russh::kex::ECDH_SHA2_NISTP256;
|
||||
@@ -10,29 +11,43 @@ use russh_keys::key::SSH_RSA;
|
||||
use super::BrocadeOptions;
|
||||
use super::Error;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SshOptions {
|
||||
pub preferred_algorithms: russh::Preferred,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl Default for SshOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
preferred_algorithms: Default::default(),
|
||||
port: 22,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SshOptions {
|
||||
fn ecdhsa_sha2_nistp256() -> Self {
|
||||
fn ecdhsa_sha2_nistp256(port: u16) -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
port,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn legacy() -> Self {
|
||||
fn legacy(port: u16) -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
port,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,18 +72,21 @@ pub async fn try_init_client(
|
||||
ip: &std::net::IpAddr,
|
||||
base_options: BrocadeOptions,
|
||||
) -> Result<BrocadeOptions, Error> {
|
||||
let mut default = SshOptions::default();
|
||||
default.port = base_options.ssh.port;
|
||||
let ssh_options = vec![
|
||||
SshOptions::default(),
|
||||
SshOptions::ecdhsa_sha2_nistp256(),
|
||||
SshOptions::legacy(),
|
||||
default,
|
||||
SshOptions::ecdhsa_sha2_nistp256(base_options.ssh.port),
|
||||
SshOptions::legacy(base_options.ssh.port),
|
||||
];
|
||||
|
||||
for ssh in ssh_options {
|
||||
let opts = BrocadeOptions {
|
||||
ssh,
|
||||
ssh: ssh.clone(),
|
||||
..base_options.clone()
|
||||
};
|
||||
let client = create_client(*ip, 22, username, password, &opts).await;
|
||||
debug!("Creating client {ip}:{} {username}", ssh.port);
|
||||
let client = create_client(*ip, ssh.port, username, password, &opts).await;
|
||||
|
||||
match client {
|
||||
Ok(_) => {
|
||||
|
||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Binary file not shown.
BIN
empty_database.sqlite
Normal file
BIN
empty_database.sqlite
Normal file
Binary file not shown.
19
examples/brocade_switch/Cargo.toml
Normal file
19
examples/brocade_switch/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "brocade-switch"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
brocade = { path = "../../brocade" }
|
||||
157
examples/brocade_switch/src/main.rs
Normal file
157
examples/brocade_switch/src/main.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use brocade::{BrocadeOptions, PortOperatingMode};
|
||||
use harmony::{
|
||||
data::Version,
|
||||
infra::brocade::BrocadeSwitchClient,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{
|
||||
HostNetworkConfig, PortConfig, PreparationError, PreparationOutcome, Switch, SwitchClient,
|
||||
SwitchError, Topology,
|
||||
},
|
||||
};
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::{id::Id, net::MacAddress, switch::PortLocation};
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let switch_score = BrocadeSwitchScore {
|
||||
port_channels_to_clear: vec![
|
||||
Id::from_str("17").unwrap(),
|
||||
Id::from_str("19").unwrap(),
|
||||
Id::from_str("18").unwrap(),
|
||||
],
|
||||
ports_to_configure: vec![
|
||||
(PortLocation(2, 0, 17), PortOperatingMode::Trunk),
|
||||
(PortLocation(2, 0, 19), PortOperatingMode::Trunk),
|
||||
(PortLocation(1, 0, 18), PortOperatingMode::Trunk),
|
||||
],
|
||||
};
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
SwitchTopology::new().await,
|
||||
vec![Box::new(switch_score)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
struct BrocadeSwitchScore {
|
||||
port_channels_to_clear: Vec<Id>,
|
||||
ports_to_configure: Vec<PortConfig>,
|
||||
}
|
||||
|
||||
impl<T: Topology + Switch> Score<T> for BrocadeSwitchScore {
|
||||
fn name(&self) -> String {
|
||||
"BrocadeSwitchScore".to_string()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(BrocadeSwitchInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BrocadeSwitchInterpret {
|
||||
score: BrocadeSwitchScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + Switch> Interpret<T> for BrocadeSwitchInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!("Applying switch configuration {:?}", self.score);
|
||||
debug!(
|
||||
"Clearing port channel {:?}",
|
||||
self.score.port_channels_to_clear
|
||||
);
|
||||
topology
|
||||
.clear_port_channel(&self.score.port_channels_to_clear)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
debug!("Configuring interfaces {:?}", self.score.ports_to_configure);
|
||||
topology
|
||||
.configure_interface(&self.score.ports_to_configure)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
Ok(Outcome::success("switch configured".to_string()))
|
||||
}
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("BrocadeSwitchInterpret")
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
struct SwitchTopology {
|
||||
client: Box<dyn SwitchClient>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Topology for SwitchTopology {
|
||||
fn name(&self) -> &str {
|
||||
"SwitchTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
Ok(PreparationOutcome::Noop)
|
||||
}
|
||||
}
|
||||
|
||||
impl SwitchTopology {
|
||||
async fn new() -> Self {
|
||||
let mut options = BrocadeOptions::default();
|
||||
options.ssh.port = 2222;
|
||||
let client =
|
||||
BrocadeSwitchClient::init(&vec![ip!("127.0.0.1")], &"admin", &"password", options)
|
||||
.await
|
||||
.expect("Failed to connect to switch");
|
||||
|
||||
let client = Box::new(client);
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Switch for SwitchTopology {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_port_for_mac_address(
|
||||
&self,
|
||||
_mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn configure_port_channel(&self, _config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
self.client.clear_port_channel(ids).await
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
self.client.configure_interface(ports).await
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||
},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
@@ -18,6 +18,7 @@ async fn main() {
|
||||
Box::new(PanicScore {}),
|
||||
Box::new(LaunchDiscoverInventoryAgentScore {
|
||||
discovery_timeout: Some(10),
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||
}),
|
||||
],
|
||||
None,
|
||||
|
||||
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "harmony_inventory_builder"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
cidr.workspace = true
|
||||
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
cargo build -p harmony_inventory_builder --release --target x86_64-unknown-linux-musl
|
||||
|
||||
SCRIPT_DIR="$(dirname ${0})"
|
||||
|
||||
cd "${SCRIPT_DIR}/docker/"
|
||||
|
||||
cp ../../../target/x86_64-unknown-linux-musl/release/harmony_inventory_builder .
|
||||
|
||||
docker build . -t hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
|
||||
docker push hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
FROM debian:12-slim
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app/
|
||||
|
||||
COPY harmony_inventory_builder /app/
|
||||
|
||||
ENV RUST_LOG=info
|
||||
|
||||
CMD ["sleep", "infinity"]
|
||||
36
examples/harmony_inventory_builder/src/main.rs
Normal file
36
examples/harmony_inventory_builder/src/main.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use harmony::{
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
use harmony_macros::cidrv4;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let discover_worker = DiscoverHostForRoleScore {
|
||||
role: HostRole::Worker,
|
||||
number_desired_hosts: 3,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||
cidr: cidrv4!("192.168.0.1/25"),
|
||||
port: 25000,
|
||||
},
|
||||
};
|
||||
|
||||
let discover_control_plane = DiscoverHostForRoleScore {
|
||||
role: HostRole::ControlPlane,
|
||||
number_desired_hosts: 3,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||
cidr: cidrv4!("192.168.0.1/25"),
|
||||
port: 25000,
|
||||
},
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
LocalhostTopology::new(),
|
||||
vec![Box::new(discover_worker), Box::new(discover_control_plane)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -39,10 +39,10 @@ async fn main() {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
|
||||
@@ -4,7 +4,10 @@ use crate::topology::{get_inventory, get_topology};
|
||||
use harmony::{
|
||||
config::secret::SshKeyPair,
|
||||
data::{FileContent, FilePath},
|
||||
modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
||||
modules::{
|
||||
inventory::HarmonyDiscoveryStrategy,
|
||||
okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -26,7 +29,8 @@ async fn main() {
|
||||
},
|
||||
})];
|
||||
|
||||
scores.append(&mut OKDInstallationPipeline::get_all_scores().await);
|
||||
scores
|
||||
.append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await);
|
||||
|
||||
harmony_cli::run(inventory, topology, scores, None)
|
||||
.await
|
||||
|
||||
@@ -31,10 +31,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
|
||||
@@ -26,10 +26,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
|
||||
@@ -35,10 +35,10 @@ async fn main() {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
|
||||
@@ -152,10 +152,10 @@ impl PhysicalHost {
|
||||
pub fn parts_list(&self) -> String {
|
||||
let PhysicalHost {
|
||||
id,
|
||||
category,
|
||||
category: _,
|
||||
network,
|
||||
storage,
|
||||
labels,
|
||||
labels: _,
|
||||
memory_modules,
|
||||
cpus,
|
||||
} = self;
|
||||
@@ -226,8 +226,8 @@ impl PhysicalHost {
|
||||
speed_mhz,
|
||||
manufacturer,
|
||||
part_number,
|
||||
serial_number,
|
||||
rank,
|
||||
serial_number: _,
|
||||
rank: _,
|
||||
} = mem;
|
||||
parts_list.push_str(&format!(
|
||||
"\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})",
|
||||
|
||||
@@ -4,6 +4,8 @@ use std::error::Error;
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
|
||||
use crate::inventory::HostRole;
|
||||
|
||||
use super::{
|
||||
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
||||
};
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
mod repository;
|
||||
use std::fmt;
|
||||
|
||||
pub use repository::*;
|
||||
|
||||
#[derive(Debug, new, Clone)]
|
||||
@@ -69,5 +71,14 @@ pub enum HostRole {
|
||||
Bootstrap,
|
||||
ControlPlane,
|
||||
Worker,
|
||||
Storage,
|
||||
}
|
||||
|
||||
impl fmt::Display for HostRole {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
HostRole::Bootstrap => write!(f, "Bootstrap"),
|
||||
HostRole::ControlPlane => write!(f, "ControlPlane"),
|
||||
HostRole::Worker => write!(f, "Worker"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::PortOperatingMode;
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
@@ -8,9 +9,9 @@ use harmony_types::{
|
||||
use log::debug;
|
||||
use log::info;
|
||||
|
||||
use crate::infra::network_manager::OpenShiftNmStateNetworkManager;
|
||||
use crate::topology::PxeOptions;
|
||||
use crate::{data::FileContent, executors::ExecutorError};
|
||||
use crate::{infra::network_manager::OpenShiftNmStateNetworkManager, topology::PortConfig};
|
||||
use crate::{modules::inventory::HarmonyDiscoveryStrategy, topology::PxeOptions};
|
||||
|
||||
use super::{
|
||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||
@@ -298,6 +299,13 @@ impl Switch for HAClusterTopology {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -521,4 +529,10 @@ impl SwitchClient for DummyInfra {
|
||||
) -> Result<u8, SwitchError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::{
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use brocade::PortOperatingMode;
|
||||
use derive_new::new;
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
@@ -214,6 +215,8 @@ impl From<String> for NetworkError {
|
||||
}
|
||||
}
|
||||
|
||||
pub type PortConfig = (PortLocation, PortOperatingMode);
|
||||
|
||||
#[async_trait]
|
||||
pub trait Switch: Send + Sync {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
||||
@@ -224,6 +227,8 @@ pub trait Switch: Send + Sync {
|
||||
) -> Result<Option<PortLocation>, SwitchError>;
|
||||
|
||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
@@ -283,6 +288,9 @@ pub trait SwitchClient: Debug + Send + Sync {
|
||||
channel_name: &str,
|
||||
switch_ports: Vec<PortLocation>,
|
||||
) -> Result<u8, SwitchError>;
|
||||
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -14,7 +14,7 @@ use k8s_openapi::{
|
||||
},
|
||||
apimachinery::pkg::util::intstr::IntOrString,
|
||||
};
|
||||
use kube::Resource;
|
||||
use kube::{Resource, api::DynamicObject};
|
||||
use log::debug;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
net::{IpAddress, MacAddress},
|
||||
switch::{PortDeclaration, PortLocation},
|
||||
};
|
||||
use option_ext::OptionExt;
|
||||
|
||||
use crate::topology::{SwitchClient, SwitchError};
|
||||
use crate::topology::{PortConfig, SwitchClient, SwitchError};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeSwitchClient {
|
||||
@@ -18,9 +19,9 @@ impl BrocadeSwitchClient {
|
||||
ip_addresses: &[IpAddress],
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Self, brocade::Error> {
|
||||
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
|
||||
let brocade = brocade::init(ip_addresses, username, password, options).await?;
|
||||
Ok(Self { brocade })
|
||||
}
|
||||
}
|
||||
@@ -59,7 +60,7 @@ impl SwitchClient for BrocadeSwitchClient {
|
||||
}
|
||||
|
||||
self.brocade
|
||||
.configure_interfaces(interfaces)
|
||||
.configure_interfaces(&interfaces)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
|
||||
@@ -111,6 +112,27 @@ impl SwitchClient for BrocadeSwitchClient {
|
||||
|
||||
Ok(channel_id)
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
for i in ids {
|
||||
self.brocade
|
||||
.clear_port_channel(&i.to_string())
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
// FIXME hardcoded TenGigabitEthernet = bad
|
||||
let ports = ports
|
||||
.iter()
|
||||
.map(|p| (format!("TenGigabitEthernet {}", p.0), p.1.clone()))
|
||||
.collect();
|
||||
self.brocade
|
||||
.configure_interfaces(&ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -145,6 +167,7 @@ mod tests {
|
||||
|
||||
client.setup().await.unwrap();
|
||||
|
||||
//TODO not sure about this
|
||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||
assert_that!(*configured_interfaces).contains_exactly(vec![
|
||||
(first_interface.name.clone(), PortOperatingMode::Access),
|
||||
@@ -255,10 +278,10 @@ mod tests {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
||||
*configured_interfaces = interfaces;
|
||||
*configured_interfaces = interfaces.clone();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ mod test {
|
||||
#[test]
|
||||
fn deployment_to_dynamic_roundtrip() {
|
||||
// Create a sample Deployment with nested structures
|
||||
let mut deployment = Deployment {
|
||||
let deployment = Deployment {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("my-deployment".to_string()),
|
||||
labels: Some({
|
||||
|
||||
@@ -8,7 +8,6 @@ mod tftp;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use management::*;
|
||||
use opnsense_config_xml::Host;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{executors::ExecutorError, topology::LogicalHost};
|
||||
|
||||
@@ -19,8 +19,11 @@ pub struct DhcpScore {
|
||||
pub host_binding: Vec<HostBinding>,
|
||||
pub next_server: Option<IpAddress>,
|
||||
pub boot_filename: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as BIOS
|
||||
pub filename: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as uefi but NOT iPXE
|
||||
pub filename64: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as iPXE
|
||||
pub filenameipxe: Option<String>,
|
||||
pub dhcp_range: (IpAddress, IpAddress),
|
||||
pub domain: Option<String>,
|
||||
|
||||
@@ -5,11 +5,10 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::LaunchDiscoverInventoryAgentScore,
|
||||
modules::inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
@@ -17,11 +16,13 @@ use crate::{
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DiscoverHostForRoleScore {
|
||||
pub role: HostRole,
|
||||
pub number_desired_hosts: i16,
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for DiscoverHostForRoleScore {
|
||||
fn name(&self) -> String {
|
||||
"DiscoverInventoryAgentScore".to_string()
|
||||
format!("DiscoverHostForRoleScore({:?})", self.role)
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
@@ -48,13 +49,15 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
);
|
||||
LaunchDiscoverInventoryAgentScore {
|
||||
discovery_timeout: None,
|
||||
discovery_strategy: self.score.discovery_strategy.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
let host: PhysicalHost;
|
||||
let mut chosen_hosts = vec![];
|
||||
let host_repo = InventoryRepositoryFactory::build().await?;
|
||||
|
||||
let mut assigned_hosts = 0;
|
||||
loop {
|
||||
let all_hosts = host_repo.get_all_hosts().await?;
|
||||
|
||||
@@ -75,16 +78,25 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
match ans {
|
||||
Ok(choice) => {
|
||||
info!(
|
||||
"Selected {} as the {:?} node.",
|
||||
choice.summary(),
|
||||
self.score.role
|
||||
"Assigned role {:?} for node {}",
|
||||
self.score.role,
|
||||
choice.summary()
|
||||
);
|
||||
host_repo
|
||||
.save_role_mapping(&self.score.role, &choice)
|
||||
.await?;
|
||||
host = choice;
|
||||
chosen_hosts.push(choice);
|
||||
assigned_hosts += 1;
|
||||
|
||||
info!(
|
||||
"Found {assigned_hosts} hosts for role {:?}",
|
||||
self.score.role
|
||||
);
|
||||
|
||||
if assigned_hosts == self.score.number_desired_hosts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(inquire::InquireError::OperationCanceled) => {
|
||||
info!("Refresh requested. Fetching list of discovered hosts again...");
|
||||
continue;
|
||||
@@ -100,8 +112,13 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
}
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Successfully discovered host {} for role {:?}",
|
||||
host.summary(),
|
||||
"Successfully discovered {} hosts {} for role {:?}",
|
||||
self.score.number_desired_hosts,
|
||||
chosen_hosts
|
||||
.iter()
|
||||
.map(|h| h.summary())
|
||||
.collect::<Vec<String>>()
|
||||
.join(", "),
|
||||
self.score.role
|
||||
)))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
mod discovery;
|
||||
pub mod inspect;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use cidr::{Ipv4Cidr, Ipv4Inet};
|
||||
pub use discovery::*;
|
||||
use tokio::time::{Duration, timeout};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_inventory_agent::local_presence::DiscoveryEvent;
|
||||
@@ -24,6 +28,7 @@ use harmony_types::id::Id;
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LaunchDiscoverInventoryAgentScore {
|
||||
pub discovery_timeout: Option<u64>,
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore {
|
||||
@@ -43,6 +48,12 @@ struct DiscoverInventoryAgentInterpret {
|
||||
score: LaunchDiscoverInventoryAgentScore,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum HarmonyDiscoveryStrategy {
|
||||
MDNS,
|
||||
SUBNET { cidr: cidr::Ipv4Cidr, port: u16 },
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
async fn execute(
|
||||
@@ -57,6 +68,37 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
),
|
||||
};
|
||||
|
||||
match self.score.discovery_strategy {
|
||||
HarmonyDiscoveryStrategy::MDNS => self.launch_mdns_discovery().await,
|
||||
HarmonyDiscoveryStrategy::SUBNET { cidr, port } => {
|
||||
self.launch_cidr_discovery(&cidr, port).await
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Outcome::success(
|
||||
"Discovery process completed successfully".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::DiscoverInventoryAgent
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscoverInventoryAgentInterpret {
|
||||
async fn launch_mdns_discovery(&self) {
|
||||
harmony_inventory_agent::local_presence::discover_agents(
|
||||
self.score.discovery_timeout,
|
||||
|event: DiscoveryEvent| -> Result<(), String> {
|
||||
@@ -112,6 +154,8 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
cpus,
|
||||
};
|
||||
|
||||
// FIXME only save the host when it is new or something changed in it.
|
||||
// we currently are saving the host every time it is discovered.
|
||||
let repo = InventoryRepositoryFactory::build()
|
||||
.await
|
||||
.map_err(|e| format!("Could not build repository : {e}"))
|
||||
@@ -132,25 +176,111 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.await;
|
||||
Ok(Outcome::success(
|
||||
"Discovery process completed successfully".to_string(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::DiscoverInventoryAgent
|
||||
// async fn launch_cidr_discovery(&self, cidr : &Ipv4Cidr, port: u16) {
|
||||
// todo!("launnch cidr discovery for {cidr} : {port}
|
||||
// - Iterate over all possible addresses in cidr
|
||||
// - make calls in batches of 20 attempting to reach harmony inventory agent on <addr, port> using same as above harmony_inventory_agent::client::get_host_inventory(&address, port)
|
||||
// - Log warn when response is 404, it means the port was used by something else unexpected
|
||||
// - Log error when response is 5xx
|
||||
// - Log debug when no response (timeout 15 seconds)
|
||||
// - Log info when found and response is 2xx
|
||||
// ");
|
||||
// }
|
||||
async fn launch_cidr_discovery(&self, cidr: &Ipv4Cidr, port: u16) {
|
||||
let addrs: Vec<Ipv4Inet> = cidr.iter().collect();
|
||||
let total = addrs.len();
|
||||
info!(
|
||||
"Starting CIDR discovery for {} hosts on {}/{} (port {})",
|
||||
total,
|
||||
cidr.network_length(),
|
||||
cidr,
|
||||
port
|
||||
);
|
||||
|
||||
let batch_size: usize = 20;
|
||||
let timeout_secs = 5;
|
||||
let request_timeout = Duration::from_secs(timeout_secs);
|
||||
|
||||
let mut current_batch = 0;
|
||||
let num_batches = addrs.len() / batch_size;
|
||||
|
||||
for batch in addrs.chunks(batch_size) {
|
||||
current_batch += 1;
|
||||
info!("Starting query batch {current_batch} of {num_batches}, timeout {timeout_secs}");
|
||||
let mut tasks = Vec::with_capacity(batch.len());
|
||||
|
||||
for addr in batch {
|
||||
let addr = addr.address().to_string();
|
||||
let port = port;
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
match timeout(
|
||||
request_timeout,
|
||||
harmony_inventory_agent::client::get_host_inventory(&addr, port),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(host)) => {
|
||||
info!("Found and response is 2xx for {addr}:{port}");
|
||||
|
||||
// Reuse the same conversion to PhysicalHost as MDNS flow
|
||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
} = host;
|
||||
|
||||
let host = PhysicalHost {
|
||||
id: Id::from(host_uuid),
|
||||
category: HostCategory::Server,
|
||||
network: network_interfaces,
|
||||
storage: storage_drives,
|
||||
labels: vec![Label {
|
||||
name: "discovered-by".to_string(),
|
||||
value: "harmony-inventory-agent".to_string(),
|
||||
}],
|
||||
memory_modules,
|
||||
cpus,
|
||||
};
|
||||
|
||||
// Save host to inventory
|
||||
let repo = InventoryRepositoryFactory::build()
|
||||
.await
|
||||
.map_err(|e| format!("Could not build repository : {e}"))
|
||||
.unwrap();
|
||||
if let Err(e) = repo.save(&host).await {
|
||||
log::debug!("Failed to save host {}: {e}", host.id);
|
||||
} else {
|
||||
info!("Saved host id {}, summary : {}", host.id, host.summary());
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
log::info!("Error querying inventory agent on {addr}:{port} : {e}");
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout for this host
|
||||
log::debug!("No response (timeout) for {addr}:{port}");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
// Wait for this batch to complete
|
||||
for t in tasks {
|
||||
let _ = t.await;
|
||||
}
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
info!("CIDR discovery completed");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::{
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::DiscoverHostForRoleScore,
|
||||
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -104,6 +104,8 @@ When you can dig them, confirm to continue.
|
||||
bootstrap_host = hosts.into_iter().next().to_owned();
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::Bootstrap,
|
||||
number_desired_hosts: 1,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
@@ -1,20 +1,10 @@
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
||||
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
|
||||
},
|
||||
interpret::Interpret,
|
||||
inventory::HostRole,
|
||||
modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret},
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
@@ -23,231 +13,23 @@ use serde::Serialize;
|
||||
// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Serialize, new)]
|
||||
pub struct OKDSetup03ControlPlaneScore {}
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OKDSetup03ControlPlaneScore {
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||
Box::new(OKDSetup03ControlPlaneInterpret::new())
|
||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||
// and for the cluster operators to become available. This would be similar to
|
||||
// the `wait-for bootstrap-complete` command.
|
||||
Box::new(OKDNodeInterpret::new(
|
||||
HostRole::ControlPlane,
|
||||
self.discovery_strategy.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"OKDSetup03ControlPlaneScore".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OKDSetup03ControlPlaneInterpret {
|
||||
version: Version,
|
||||
status: InterpretStatus,
|
||||
}
|
||||
|
||||
impl OKDSetup03ControlPlaneInterpret {
|
||||
pub fn new() -> Self {
|
||||
let version = Version::from("1.0.0").unwrap();
|
||||
Self {
|
||||
version,
|
||||
status: InterpretStatus::QUEUED,
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
||||
/// It will trigger discovery if not enough hosts are found.
|
||||
async fn get_nodes(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||
const REQUIRED_HOSTS: usize = 3;
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
|
||||
while control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
info!(
|
||||
"Discovery of {} control plane hosts in progress, current number {}",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
);
|
||||
// This score triggers the discovery agent for a specific role.
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::ControlPlane,
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
}
|
||||
|
||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
Err(InterpretError::new(format!(
|
||||
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
)))
|
||||
} else {
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(control_plane_hosts
|
||||
.into_iter()
|
||||
.take(REQUIRED_HOSTS)
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures DHCP host bindings for all control plane nodes.
|
||||
async fn configure_host_binding(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!("[ControlPlane] Configuring host bindings for control plane nodes.");
|
||||
|
||||
// Ensure the topology definition matches the number of physical nodes found.
|
||||
if topology.control_plane.len() != nodes.len() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).",
|
||||
topology.control_plane.len(),
|
||||
nodes.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Create a binding for each physical host to its corresponding logical host.
|
||||
let bindings: Vec<HostBinding> = topology
|
||||
.control_plane
|
||||
.iter()
|
||||
.zip(nodes.iter())
|
||||
.map(|(logical_host, physical_host)| {
|
||||
info!(
|
||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
||||
logical_host.name, physical_host.id
|
||||
);
|
||||
HostBinding {
|
||||
logical_host: logical_host.clone(),
|
||||
physical_host: physical_host.clone(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
DhcpHostBindingScore {
|
||||
host_binding: bindings,
|
||||
domain: Some(topology.domain_name.clone()),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Renders and deploys a per-MAC iPXE boot file for each control plane node.
|
||||
async fn configure_ipxe(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!("[ControlPlane] Rendering per-MAC iPXE configurations.");
|
||||
|
||||
// The iPXE script content is the same for all control plane nodes,
|
||||
// pointing to the 'master.ign' ignition file.
|
||||
let content = BootstrapIpxeTpl {
|
||||
http_ip: &topology.http_server.get_ip().to_string(),
|
||||
scos_path: "scos",
|
||||
ignition_http_path: "okd_ignition_files",
|
||||
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
||||
ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
|
||||
}
|
||||
.to_string();
|
||||
|
||||
debug!("[ControlPlane] iPXE content template:\n{content}");
|
||||
|
||||
// Create and apply an iPXE boot file for each node.
|
||||
for node in nodes {
|
||||
let mac_address = node.get_mac_address();
|
||||
if mac_address.is_empty() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Physical host with ID '{}' has no MAC addresses defined.",
|
||||
node.id
|
||||
)));
|
||||
}
|
||||
info!(
|
||||
"[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
||||
node.id, mac_address
|
||||
);
|
||||
|
||||
IPxeMacBootFileScore {
|
||||
mac_address,
|
||||
content: content.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prompts the user to reboot the target control plane nodes.
|
||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
|
||||
|
||||
let confirmation = inquire::Confirm::new(
|
||||
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
||||
)
|
||||
.prompt()
|
||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||
|
||||
if !confirmation {
|
||||
return Err(InterpretError::new(
|
||||
"User aborted the operation.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OKDSetup03ControlPlane")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
self.version.clone()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
// 1. Ensure we have 3 physical hosts for the control plane.
|
||||
let nodes = self.get_nodes(inventory, topology).await?;
|
||||
|
||||
// 2. Create DHCP reservations for the control plane nodes.
|
||||
self.configure_host_binding(inventory, topology, &nodes)
|
||||
.await?;
|
||||
|
||||
// 3. Create iPXE files for each control plane node to boot from the master ignition.
|
||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
||||
|
||||
// 4. Reboot the nodes to start the OS installation.
|
||||
self.reboot_targets(&nodes).await?;
|
||||
|
||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||
// and for the cluster operators to become available. This would be similar to
|
||||
// the `wait-for bootstrap-complete` command.
|
||||
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
||||
|
||||
Ok(Outcome::success(
|
||||
"Control plane provisioning has been successfully initiated.".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
interpret::Interpret,
|
||||
inventory::HostRole,
|
||||
modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -18,66 +14,20 @@ use crate::{
|
||||
// - Persist bonding via MC/NNCP as required (same approach as masters).
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Serialize, new)]
|
||||
pub struct OKDSetup04WorkersScore {}
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OKDSetup04WorkersScore {
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
|
||||
Box::new(OKDNodeInterpret::new(
|
||||
HostRole::ControlPlane,
|
||||
self.discovery_strategy.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"OKDSetup04WorkersScore".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OKDSetup04WorkersInterpret {
|
||||
score: OKDSetup04WorkersScore,
|
||||
version: Version,
|
||||
status: InterpretStatus,
|
||||
}
|
||||
|
||||
impl OKDSetup04WorkersInterpret {
|
||||
pub fn new(score: OKDSetup04WorkersScore) -> Self {
|
||||
let version = Version::from("1.0.0").unwrap();
|
||||
Self {
|
||||
version,
|
||||
score,
|
||||
status: InterpretStatus::QUEUED,
|
||||
}
|
||||
}
|
||||
|
||||
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
|
||||
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OKDSetup04Workers")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
self.version.clone()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.render_and_reboot().await?;
|
||||
Ok(Outcome::success("Workers provisioned".into()))
|
||||
}
|
||||
}
|
||||
|
||||
313
harmony/src/modules/okd/bootstrap_okd_node.rs
Normal file
313
harmony/src/modules/okd/bootstrap_okd_node.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::IPxeMacBootFileScore,
|
||||
inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
okd::{
|
||||
okd_node::{BootstrapRole, ControlPlaneRole, OKDRoleProperties, WorkerRole},
|
||||
templates::BootstrapIpxeTpl,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding, LogicalHost},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, new)]
|
||||
pub struct OKDNodeInstallationScore {
|
||||
host_role: HostRole,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl Score<HAClusterTopology> for OKDNodeInstallationScore {
|
||||
fn name(&self) -> String {
|
||||
"OKDNodeScore".to_string()
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||
Box::new(OKDNodeInterpret::new(
|
||||
self.host_role.clone(),
|
||||
self.discovery_strategy.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OKDNodeInterpret {
|
||||
host_role: HostRole,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl OKDNodeInterpret {
|
||||
pub fn new(host_role: HostRole, discovery_strategy: HarmonyDiscoveryStrategy) -> Self {
|
||||
Self {
|
||||
host_role,
|
||||
discovery_strategy,
|
||||
}
|
||||
}
|
||||
|
||||
fn okd_role_properties(&self, role: &HostRole) -> &'static dyn OKDRoleProperties {
|
||||
match role {
|
||||
HostRole::Bootstrap => &BootstrapRole,
|
||||
HostRole::ControlPlane => &ControlPlaneRole,
|
||||
HostRole::Worker => &WorkerRole,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_nodes(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
|
||||
let mut hosts = repo.get_host_for_role(&self.host_role).await?;
|
||||
|
||||
let okd_host_properties = self.okd_role_properties(&self.host_role);
|
||||
|
||||
let required_hosts: i16 = okd_host_properties.required_hosts();
|
||||
|
||||
info!(
|
||||
"Discovery of {} {} hosts in progress, current number {}",
|
||||
required_hosts,
|
||||
self.host_role,
|
||||
hosts.len()
|
||||
);
|
||||
// This score triggers the discovery agent for a specific role.
|
||||
DiscoverHostForRoleScore {
|
||||
role: self.host_role.clone(),
|
||||
number_desired_hosts: required_hosts,
|
||||
discovery_strategy: self.discovery_strategy.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
hosts = repo.get_host_for_role(&self.host_role).await?;
|
||||
|
||||
if hosts.len() < required_hosts.try_into().unwrap_or(0) {
|
||||
Err(InterpretError::new(format!(
|
||||
"OKD Requires at least {} {} hosts, but only found {}. Cannot proceed.",
|
||||
required_hosts,
|
||||
self.host_role,
|
||||
hosts.len()
|
||||
)))
|
||||
} else {
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(hosts
|
||||
.into_iter()
|
||||
.take(required_hosts.try_into().unwrap())
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures DHCP host bindings for all nodes.
|
||||
async fn configure_host_binding(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!(
|
||||
"[{}] Configuring host bindings for {} plane nodes.",
|
||||
self.host_role, self.host_role,
|
||||
);
|
||||
|
||||
let host_properties = self.okd_role_properties(&self.host_role);
|
||||
|
||||
self.validate_host_node_match(nodes, host_properties.logical_hosts(topology))?;
|
||||
|
||||
let bindings: Vec<HostBinding> =
|
||||
self.host_bindings(nodes, host_properties.logical_hosts(topology));
|
||||
|
||||
DhcpHostBindingScore {
|
||||
host_binding: bindings,
|
||||
domain: Some(topology.domain_name.clone()),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Ensure the topology definition matches the number of physical nodes found.
|
||||
fn validate_host_node_match(
|
||||
&self,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
hosts: &Vec<LogicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
if hosts.len() != nodes.len() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Mismatch between logical hosts defined in topology ({}) and physical nodes found ({}).",
|
||||
hosts.len(),
|
||||
nodes.len()
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Create a binding for each physical host to its corresponding logical host.
|
||||
fn host_bindings(
|
||||
&self,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
hosts: &Vec<LogicalHost>,
|
||||
) -> Vec<HostBinding> {
|
||||
hosts
|
||||
.iter()
|
||||
.zip(nodes.iter())
|
||||
.map(|(logical_host, physical_host)| {
|
||||
info!(
|
||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
||||
logical_host.name, physical_host.id
|
||||
);
|
||||
HostBinding {
|
||||
logical_host: logical_host.clone(),
|
||||
physical_host: physical_host.clone(),
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Renders and deploys a per-MAC iPXE boot file for each node.
|
||||
async fn configure_ipxe(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!(
|
||||
"[{}] Rendering per-MAC iPXE configurations.",
|
||||
self.host_role
|
||||
);
|
||||
|
||||
let okd_role_properties = self.okd_role_properties(&self.host_role);
|
||||
// The iPXE script content is the same for all control plane nodes,
|
||||
// pointing to the 'master.ign' ignition file.
|
||||
let content = BootstrapIpxeTpl {
|
||||
http_ip: &topology.http_server.get_ip().to_string(),
|
||||
scos_path: "scos",
|
||||
ignition_http_path: "okd_ignition_files",
|
||||
//TODO must be refactored to not only use /dev/sda
|
||||
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
||||
ignition_file_name: okd_role_properties.ignition_file(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
debug!("[{}] iPXE content template:\n{content}", self.host_role);
|
||||
|
||||
// Create and apply an iPXE boot file for each node.
|
||||
for node in nodes {
|
||||
let mac_address = node.get_mac_address();
|
||||
if mac_address.is_empty() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Physical host with ID '{}' has no MAC addresses defined.",
|
||||
node.id
|
||||
)));
|
||||
}
|
||||
info!(
|
||||
"[{}] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
||||
self.host_role, node.id, mac_address
|
||||
);
|
||||
|
||||
IPxeMacBootFileScore {
|
||||
mac_address,
|
||||
content: content.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prompts the user to reboot the target control plane nodes.
|
||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||
info!(
|
||||
"[{}] Requesting reboot for control plane nodes: {node_ids:?}",
|
||||
self.host_role
|
||||
);
|
||||
|
||||
let confirmation = inquire::Confirm::new(
|
||||
&format!("Please reboot the {} {} nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), self.host_role, node_ids.join(", ")),
|
||||
)
|
||||
.prompt()
|
||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||
|
||||
if !confirmation {
|
||||
return Err(InterpretError::new(
|
||||
"User aborted the operation.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Interpret<HAClusterTopology> for OKDNodeInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
// 1. Ensure we have the specfied number of physical hosts.
|
||||
let nodes = self.get_nodes(inventory, topology).await?;
|
||||
|
||||
// 2. Create DHCP reservations for the nodes.
|
||||
self.configure_host_binding(inventory, topology, &nodes)
|
||||
.await?;
|
||||
|
||||
// 3. Create iPXE files for each node to boot from the ignition.
|
||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
||||
|
||||
// 4. Reboot the nodes to start the OS installation.
|
||||
self.reboot_targets(&nodes).await?;
|
||||
// TODO: Implement a step to validate that the installation of the nodes is
|
||||
// complete and for the cluster operators to become available.
|
||||
//
|
||||
// The OpenShift installer only provides two wait commands which currently need to be
|
||||
// run manually:
|
||||
// - `openshift-install wait-for bootstrap-complete`
|
||||
// - `openshift-install wait-for install-complete`
|
||||
//
|
||||
// There is no installer command that waits specifically for worker node
|
||||
// provisioning. Worker nodes join asynchronously (via ignition + CSR approval),
|
||||
// and the cluster becomes fully functional only once all nodes are Ready and the
|
||||
// cluster operators report Available=True.
|
||||
info!(
|
||||
"[{}] Provisioning initiated. Monitor the cluster convergence manually.",
|
||||
self.host_role
|
||||
);
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"{} provisioning has been successfully initiated.",
|
||||
self.host_role
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OKDNodeSetup".into())
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -251,14 +251,15 @@ impl<T: Topology + NetworkManager + Switch> Interpret<T> for HostNetworkConfigur
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use assertor::*;
|
||||
use brocade::PortOperatingMode;
|
||||
use harmony_types::{net::MacAddress, switch::PortLocation};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use crate::{
|
||||
hardware::HostCategory,
|
||||
topology::{
|
||||
HostNetworkConfig, NetworkError, PreparationError, PreparationOutcome, SwitchError,
|
||||
SwitchPort,
|
||||
HostNetworkConfig, NetworkError, PortConfig, PreparationError, PreparationOutcome,
|
||||
SwitchError, SwitchPort,
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
@@ -692,5 +693,14 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(
|
||||
&self,
|
||||
port_config: &Vec<PortConfig>,
|
||||
) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,11 +48,14 @@
|
||||
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
|
||||
|
||||
use crate::{
|
||||
modules::okd::{
|
||||
modules::{
|
||||
inventory::HarmonyDiscoveryStrategy,
|
||||
okd::{
|
||||
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
|
||||
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -60,13 +63,19 @@ use crate::{
|
||||
pub struct OKDInstallationPipeline;
|
||||
|
||||
impl OKDInstallationPipeline {
|
||||
pub async fn get_all_scores() -> Vec<Box<dyn Score<HAClusterTopology>>> {
|
||||
pub async fn get_all_scores(
|
||||
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
) -> Vec<Box<dyn Score<HAClusterTopology>>> {
|
||||
vec![
|
||||
Box::new(OKDSetup01InventoryScore::new()),
|
||||
Box::new(OKDSetup02BootstrapScore::new()),
|
||||
Box::new(OKDSetup03ControlPlaneScore::new()),
|
||||
Box::new(OKDSetup03ControlPlaneScore {
|
||||
discovery_strategy: discovery_strategy.clone(),
|
||||
}),
|
||||
Box::new(OKDSetupPersistNetworkBondScore::new()),
|
||||
Box::new(OKDSetup04WorkersScore::new()),
|
||||
Box::new(OKDSetup04WorkersScore {
|
||||
discovery_strategy: discovery_strategy.clone(),
|
||||
}),
|
||||
Box::new(OKDSetup05SanityCheckScore::new()),
|
||||
Box::new(OKDSetup06InstallationReportScore::new()),
|
||||
]
|
||||
|
||||
@@ -6,12 +6,14 @@ mod bootstrap_05_sanity_check;
|
||||
mod bootstrap_06_installation_report;
|
||||
pub mod bootstrap_dhcp;
|
||||
pub mod bootstrap_load_balancer;
|
||||
pub mod bootstrap_okd_node;
|
||||
mod bootstrap_persist_network_bond;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
pub mod installation;
|
||||
pub mod ipxe;
|
||||
pub mod load_balancer;
|
||||
pub mod okd_node;
|
||||
pub mod templates;
|
||||
pub mod upgrade;
|
||||
pub use bootstrap_01_prepare::*;
|
||||
|
||||
54
harmony/src/modules/okd/okd_node.rs
Normal file
54
harmony/src/modules/okd/okd_node.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use crate::topology::{HAClusterTopology, LogicalHost};
|
||||
|
||||
pub trait OKDRoleProperties {
|
||||
fn ignition_file(&self) -> &'static str;
|
||||
fn required_hosts(&self) -> i16;
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost>;
|
||||
}
|
||||
|
||||
pub struct BootstrapRole;
|
||||
pub struct ControlPlaneRole;
|
||||
pub struct WorkerRole;
|
||||
pub struct StorageRole;
|
||||
|
||||
impl OKDRoleProperties for BootstrapRole {
|
||||
fn ignition_file(&self) -> &'static str {
|
||||
"bootstrap.ign"
|
||||
}
|
||||
|
||||
fn required_hosts(&self) -> i16 {
|
||||
1
|
||||
}
|
||||
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl OKDRoleProperties for ControlPlaneRole {
|
||||
fn ignition_file(&self) -> &'static str {
|
||||
"master.ign"
|
||||
}
|
||||
|
||||
fn required_hosts(&self) -> i16 {
|
||||
3
|
||||
}
|
||||
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||
&t.control_plane
|
||||
}
|
||||
}
|
||||
|
||||
impl OKDRoleProperties for WorkerRole {
|
||||
fn ignition_file(&self) -> &'static str {
|
||||
"worker.ign"
|
||||
}
|
||||
|
||||
fn required_hosts(&self) -> i16 {
|
||||
2
|
||||
}
|
||||
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||
&t.workers
|
||||
}
|
||||
}
|
||||
11
harmony_inventory_agent/build_docker.sh
Executable file
11
harmony_inventory_agent/build_docker.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
cargo build -p harmony_inventory_agent --release --target x86_64-unknown-linux-musl
|
||||
|
||||
SCRIPT_DIR="$(dirname ${0})"
|
||||
|
||||
cd "${SCRIPT_DIR}/docker/"
|
||||
|
||||
cp ../../target/x86_64-unknown-linux-musl/release/harmony_inventory_agent .
|
||||
|
||||
docker build . -t hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
|
||||
docker push hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
1
harmony_inventory_agent/docker/.gitignore
vendored
Normal file
1
harmony_inventory_agent/docker/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
harmony_inventory_agent
|
||||
17
harmony_inventory_agent/docker/Dockerfile
Normal file
17
harmony_inventory_agent/docker/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM debian:12-slim
|
||||
|
||||
# install packages required to make these commands available : lspci, lsmod, dmidecode, smartctl, ip
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends pciutils kmod dmidecode smartmontools iproute2 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app/
|
||||
|
||||
COPY harmony_inventory_agent /app/
|
||||
|
||||
ENV RUST_LOG=info
|
||||
|
||||
CMD [ "/app/harmony_inventory_agent" ]
|
||||
|
||||
117
harmony_inventory_agent/harmony-inventory-agent-daemonset.yaml
Normal file
117
harmony_inventory_agent/harmony-inventory-agent-daemonset.yaml
Normal file
@@ -0,0 +1,117 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
pod-security.kubernetes.io/audit: privileged
|
||||
pod-security.kubernetes.io/warn: privileged
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
---
|
||||
# Grant the built-in "privileged" SCC to the SA
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: use-privileged-scc
|
||||
namespace: harmony-inventory-agent
|
||||
rules:
|
||||
- apiGroups: ["security.openshift.io"]
|
||||
resources: ["securitycontextconstraints"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: use-privileged-scc
|
||||
namespace: harmony-inventory-agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: use-privileged-scc
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: harmony-inventory-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: harmony-inventory-agent
|
||||
spec:
|
||||
serviceAccountName: harmony-inventory-agent
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: inventory-agent
|
||||
image: hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "harmony_inventory_agent=trace,info"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
# optional: leave the rest unset since privileged SCC allows it
|
||||
#
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: harmony-inventory-builder
|
||||
namespace: harmony-inventory-agent
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy: {}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: harmony-inventory-builder
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: harmony-inventory-builder
|
||||
spec:
|
||||
serviceAccountName: harmony-inventory-agent
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: inventory-agent
|
||||
image: hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "harmony_inventory_builder=trace,info"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
# optional: leave the rest unset since privileged SCC allows it
|
||||
@@ -1,5 +1,5 @@
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::{debug, warn};
|
||||
use log::{debug, trace, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
@@ -121,20 +121,48 @@ pub struct ManagementInterface {
|
||||
|
||||
impl PhysicalHost {
|
||||
pub fn gather() -> Result<Self, String> {
|
||||
trace!("Start gathering physical host information");
|
||||
let mut sys = System::new_all();
|
||||
trace!("System new_all called");
|
||||
sys.refresh_all();
|
||||
trace!("System refresh_all called");
|
||||
|
||||
Self::all_tools_available()?;
|
||||
|
||||
trace!("All tools_available success");
|
||||
|
||||
let storage_drives = Self::gather_storage_drives()?;
|
||||
trace!("got storage drives");
|
||||
|
||||
let storage_controller = Self::gather_storage_controller()?;
|
||||
trace!("got storage controller");
|
||||
|
||||
let memory_modules = Self::gather_memory_modules()?;
|
||||
trace!("got memory_modules");
|
||||
|
||||
let cpus = Self::gather_cpus(&sys)?;
|
||||
trace!("got cpus");
|
||||
|
||||
let chipset = Self::gather_chipset()?;
|
||||
trace!("got chipsets");
|
||||
|
||||
let network_interfaces = Self::gather_network_interfaces()?;
|
||||
trace!("got network_interfaces");
|
||||
|
||||
let management_interface = Self::gather_management_interface()?;
|
||||
trace!("got management_interface");
|
||||
|
||||
let host_uuid = Self::get_host_uuid()?;
|
||||
|
||||
Ok(Self {
|
||||
storage_drives: Self::gather_storage_drives()?,
|
||||
storage_controller: Self::gather_storage_controller()?,
|
||||
memory_modules: Self::gather_memory_modules()?,
|
||||
cpus: Self::gather_cpus(&sys)?,
|
||||
chipset: Self::gather_chipset()?,
|
||||
network_interfaces: Self::gather_network_interfaces()?,
|
||||
management_interface: Self::gather_management_interface()?,
|
||||
host_uuid: Self::get_host_uuid()?,
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -208,6 +236,8 @@ impl PhysicalHost {
|
||||
));
|
||||
}
|
||||
|
||||
debug!("All tools found!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -231,7 +261,10 @@ impl PhysicalHost {
|
||||
fn gather_storage_drives() -> Result<Vec<StorageDrive>, String> {
|
||||
let mut drives = Vec::new();
|
||||
|
||||
trace!("Starting storage drive discovery using lsblk");
|
||||
|
||||
// Use lsblk with JSON output for robust parsing
|
||||
trace!("Executing 'lsblk -d -o NAME,MODEL,SERIAL,SIZE,ROTA,WWN -n -e 7 --json'");
|
||||
let output = Command::new("lsblk")
|
||||
.args([
|
||||
"-d",
|
||||
@@ -245,13 +278,18 @@ impl PhysicalHost {
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute lsblk: {}", e))?;
|
||||
|
||||
trace!(
|
||||
"lsblk command executed successfully (status: {:?})",
|
||||
output.status
|
||||
);
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(format!(
|
||||
"lsblk command failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
));
|
||||
let stderr_str = String::from_utf8_lossy(&output.stderr);
|
||||
debug!("lsblk command failed: {stderr_str}");
|
||||
return Err(format!("lsblk command failed: {stderr_str}"));
|
||||
}
|
||||
|
||||
trace!("Parsing lsblk JSON output");
|
||||
let json: Value = serde_json::from_slice(&output.stdout)
|
||||
.map_err(|e| format!("Failed to parse lsblk JSON output: {}", e))?;
|
||||
|
||||
@@ -260,6 +298,8 @@ impl PhysicalHost {
|
||||
.and_then(|v| v.as_array())
|
||||
.ok_or("Invalid lsblk JSON: missing 'blockdevices' array")?;
|
||||
|
||||
trace!("Found {} blockdevices in lsblk output", blockdevices.len());
|
||||
|
||||
for device in blockdevices {
|
||||
let name = device
|
||||
.get("name")
|
||||
@@ -268,52 +308,72 @@ impl PhysicalHost {
|
||||
.to_string();
|
||||
|
||||
if name.is_empty() {
|
||||
trace!("Skipping unnamed device entry: {:?}", device);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Inspecting block device: {name}");
|
||||
|
||||
// Extract metadata fields
|
||||
let model = device
|
||||
.get("model")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.unwrap_or_default();
|
||||
trace!("Model for {name}: '{}'", model);
|
||||
|
||||
let serial = device
|
||||
.get("serial")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.unwrap_or_default();
|
||||
trace!("Serial for {name}: '{}'", serial);
|
||||
|
||||
let size_str = device
|
||||
.get("size")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or("Missing 'size' in lsblk device")?;
|
||||
trace!("Reported size for {name}: {}", size_str);
|
||||
let size_bytes = Self::parse_size(size_str)?;
|
||||
trace!("Parsed size for {name}: {} bytes", size_bytes);
|
||||
|
||||
let rotational = device
|
||||
.get("rota")
|
||||
.and_then(|v| v.as_bool())
|
||||
.ok_or("Missing 'rota' in lsblk device")?;
|
||||
trace!("Rotational flag for {name}: {}", rotational);
|
||||
|
||||
let wwn = device
|
||||
.get("wwn")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty() && s != "null");
|
||||
trace!("WWN for {name}: {:?}", wwn);
|
||||
|
||||
let device_path = Path::new("/sys/block").join(&name);
|
||||
trace!("Sysfs path for {name}: {:?}", device_path);
|
||||
|
||||
trace!("Reading logical block size for {name}");
|
||||
let logical_block_size = Self::read_sysfs_u32(
|
||||
&device_path.join("queue/logical_block_size"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read logical block size for {}: {}", name, e))?;
|
||||
trace!("Logical block size for {name}: {}", logical_block_size);
|
||||
|
||||
trace!("Reading physical block size for {name}");
|
||||
let physical_block_size = Self::read_sysfs_u32(
|
||||
&device_path.join("queue/physical_block_size"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read physical block size for {}: {}", name, e))?;
|
||||
trace!("Physical block size for {name}: {}", physical_block_size);
|
||||
|
||||
trace!("Determining interface type for {name}");
|
||||
let interface_type = Self::get_interface_type(&name, &device_path)?;
|
||||
trace!("Interface type for {name}: {}", interface_type);
|
||||
|
||||
trace!("Getting SMART status for {name}");
|
||||
let smart_status = Self::get_smart_status(&name)?;
|
||||
trace!("SMART status for {name}: {:?}", smart_status);
|
||||
|
||||
let mut drive = StorageDrive {
|
||||
name: name.clone(),
|
||||
@@ -330,19 +390,31 @@ impl PhysicalHost {
|
||||
|
||||
// Enhance with additional sysfs info if available
|
||||
if device_path.exists() {
|
||||
trace!("Enhancing drive {name} with extra sysfs metadata");
|
||||
if drive.model.is_empty() {
|
||||
trace!("Reading model from sysfs for {name}");
|
||||
drive.model = Self::read_sysfs_string(&device_path.join("device/model"))
|
||||
.unwrap_or(format!("Failed to read model for {}", name));
|
||||
.unwrap_or_else(|_| format!("Failed to read model for {}", name));
|
||||
}
|
||||
if drive.serial.is_empty() {
|
||||
trace!("Reading serial from sysfs for {name}");
|
||||
drive.serial = Self::read_sysfs_string(&device_path.join("device/serial"))
|
||||
.unwrap_or(format!("Failed to read serial for {}", name));
|
||||
.unwrap_or_else(|_| format!("Failed to read serial for {}", name));
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"Sysfs path {:?} not found for drive {name}, skipping extra metadata",
|
||||
device_path
|
||||
);
|
||||
}
|
||||
|
||||
debug!("Discovered storage drive: {drive:?}");
|
||||
drives.push(drive);
|
||||
}
|
||||
|
||||
debug!("Discovered total {} storage drives", drives.len());
|
||||
trace!("All discovered dives: {drives:?}");
|
||||
|
||||
Ok(drives)
|
||||
}
|
||||
|
||||
@@ -418,6 +490,8 @@ impl PhysicalHost {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found storage controller {controller:?}");
|
||||
|
||||
Ok(controller)
|
||||
}
|
||||
|
||||
@@ -486,6 +560,7 @@ impl PhysicalHost {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found memory modules {modules:?}");
|
||||
Ok(modules)
|
||||
}
|
||||
|
||||
@@ -501,22 +576,30 @@ impl PhysicalHost {
|
||||
frequency_mhz: global_cpu.frequency(),
|
||||
});
|
||||
|
||||
debug!("Found cpus {cpus:?}");
|
||||
|
||||
Ok(cpus)
|
||||
}
|
||||
|
||||
fn gather_chipset() -> Result<Chipset, String> {
|
||||
Ok(Chipset {
|
||||
let chipset = Chipset {
|
||||
name: Self::read_dmi("baseboard-product-name")?,
|
||||
vendor: Self::read_dmi("baseboard-manufacturer")?,
|
||||
})
|
||||
};
|
||||
|
||||
debug!("Found chipset {chipset:?}");
|
||||
|
||||
Ok(chipset)
|
||||
}
|
||||
|
||||
fn gather_network_interfaces() -> Result<Vec<NetworkInterface>, String> {
|
||||
let mut interfaces = Vec::new();
|
||||
let sys_net_path = Path::new("/sys/class/net");
|
||||
trace!("Reading /sys/class/net");
|
||||
|
||||
let entries = fs::read_dir(sys_net_path)
|
||||
.map_err(|e| format!("Failed to read /sys/class/net: {}", e))?;
|
||||
trace!("Got entries {entries:?}");
|
||||
|
||||
for entry in entries {
|
||||
let entry = entry.map_err(|e| format!("Failed to read directory entry: {}", e))?;
|
||||
@@ -525,6 +608,7 @@ impl PhysicalHost {
|
||||
.into_string()
|
||||
.map_err(|_| "Invalid UTF-8 in interface name")?;
|
||||
let iface_path = entry.path();
|
||||
trace!("Inspecting interface {iface_name} path {iface_path:?}");
|
||||
|
||||
// Skip virtual interfaces
|
||||
if iface_name.starts_with("lo")
|
||||
@@ -535,70 +619,101 @@ impl PhysicalHost {
|
||||
|| iface_name.starts_with("tun")
|
||||
|| iface_name.starts_with("wg")
|
||||
{
|
||||
trace!(
|
||||
"Skipping interface {iface_name} because it appears to be virtual/unsupported"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if it's a physical interface by looking for device directory
|
||||
if !iface_path.join("device").exists() {
|
||||
trace!(
|
||||
"Skipping interface {iface_name} since {iface_path:?}/device does not exist"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Reading MAC address for {iface_name}");
|
||||
let mac_address = Self::read_sysfs_string(&iface_path.join("address"))
|
||||
.map_err(|e| format!("Failed to read MAC address for {}: {}", iface_name, e))?;
|
||||
let mac_address = MacAddress::try_from(mac_address).map_err(|e| e.to_string())?;
|
||||
trace!("MAC address for {iface_name}: {mac_address}");
|
||||
|
||||
let speed_mbps = if iface_path.join("speed").exists() {
|
||||
match Self::read_sysfs_u32(&iface_path.join("speed")) {
|
||||
Ok(speed) => Some(speed),
|
||||
let speed_path = iface_path.join("speed");
|
||||
let speed_mbps = if speed_path.exists() {
|
||||
trace!("Reading speed for {iface_name} from {:?}", speed_path);
|
||||
match Self::read_sysfs_u32(&speed_path) {
|
||||
Ok(speed) => {
|
||||
trace!("Speed for {iface_name}: {speed} Mbps");
|
||||
Some(speed)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(
|
||||
"Failed to read speed for {}: {} . This is expected to fail on wifi interfaces.",
|
||||
"Failed to read speed for {}: {} (this may be expected on Wi‑Fi interfaces)",
|
||||
iface_name, e
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trace!("Speed file not found for {iface_name}, skipping");
|
||||
None
|
||||
};
|
||||
|
||||
trace!("Reading operstate for {iface_name}");
|
||||
let operstate = Self::read_sysfs_string(&iface_path.join("operstate"))
|
||||
.map_err(|e| format!("Failed to read operstate for {}: {}", iface_name, e))?;
|
||||
trace!("Operstate for {iface_name}: {operstate}");
|
||||
|
||||
trace!("Reading MTU for {iface_name}");
|
||||
let mtu = Self::read_sysfs_u32(&iface_path.join("mtu"))
|
||||
.map_err(|e| format!("Failed to read MTU for {}: {}", iface_name, e))?;
|
||||
trace!("MTU for {iface_name}: {mtu}");
|
||||
|
||||
trace!("Reading driver for {iface_name}");
|
||||
let driver =
|
||||
Self::read_sysfs_symlink_basename(&iface_path.join("device/driver/module"))
|
||||
.map_err(|e| format!("Failed to read driver for {}: {}", iface_name, e))?;
|
||||
trace!("Driver for {iface_name}: {driver}");
|
||||
|
||||
trace!("Reading firmware version for {iface_name}");
|
||||
let firmware_version = Self::read_sysfs_opt_string(
|
||||
&iface_path.join("device/firmware_version"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read firmware version for {}: {}", iface_name, e))?;
|
||||
trace!("Firmware version for {iface_name}: {firmware_version:?}");
|
||||
|
||||
// Get IP addresses using ip command with JSON output
|
||||
trace!("Fetching IP addresses for {iface_name}");
|
||||
let (ipv4_addresses, ipv6_addresses) = Self::get_interface_ips_json(&iface_name)
|
||||
.map_err(|e| format!("Failed to get IP addresses for {}: {}", iface_name, e))?;
|
||||
trace!("Interface {iface_name} has IPv4: {ipv4_addresses:?}, IPv6: {ipv6_addresses:?}");
|
||||
|
||||
interfaces.push(NetworkInterface {
|
||||
name: iface_name,
|
||||
let is_up = operstate == "up";
|
||||
trace!("Constructing NetworkInterface for {iface_name} (is_up={is_up})");
|
||||
|
||||
let iface = NetworkInterface {
|
||||
name: iface_name.clone(),
|
||||
mac_address,
|
||||
speed_mbps,
|
||||
is_up: operstate == "up",
|
||||
is_up,
|
||||
mtu,
|
||||
ipv4_addresses,
|
||||
ipv6_addresses,
|
||||
driver,
|
||||
firmware_version,
|
||||
});
|
||||
};
|
||||
|
||||
debug!("Discovered interface: {iface:?}");
|
||||
interfaces.push(iface);
|
||||
}
|
||||
|
||||
debug!("Discovered total {} network interfaces", interfaces.len());
|
||||
trace!("Interfaces collected: {interfaces:?}");
|
||||
Ok(interfaces)
|
||||
}
|
||||
|
||||
fn gather_management_interface() -> Result<Option<ManagementInterface>, String> {
|
||||
if Path::new("/dev/ipmi0").exists() {
|
||||
let mgmt = if Path::new("/dev/ipmi0").exists() {
|
||||
Ok(Some(ManagementInterface {
|
||||
kind: "IPMI".to_string(),
|
||||
address: None,
|
||||
@@ -612,11 +727,16 @@ impl PhysicalHost {
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Found management interface {mgmt:?}");
|
||||
mgmt
|
||||
}
|
||||
|
||||
fn get_host_uuid() -> Result<String, String> {
|
||||
Self::read_dmi("system-uuid")
|
||||
let uuid = Self::read_dmi("system-uuid");
|
||||
debug!("Found uuid {uuid:?}");
|
||||
uuid
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
@@ -709,7 +829,8 @@ impl PhysicalHost {
|
||||
Ok("Ramdisk".to_string())
|
||||
} else {
|
||||
// Try to determine from device path
|
||||
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))?;
|
||||
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))
|
||||
.unwrap_or(String::new());
|
||||
Ok(subsystem
|
||||
.split('/')
|
||||
.next_back()
|
||||
@@ -779,6 +900,8 @@ impl PhysicalHost {
|
||||
size.map(|s| s as u64)
|
||||
}
|
||||
|
||||
// FIXME when scanning an interface that is part of a bond/bridge we won't get an address on the
|
||||
// interface, we should be looking at the bond/bridge device. For example, br-ex on k8s nodes.
|
||||
fn get_interface_ips_json(iface_name: &str) -> Result<(Vec<String>, Vec<String>), String> {
|
||||
let mut ipv4 = Vec::new();
|
||||
let mut ipv6 = Vec::new();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use log::{debug, error, info, warn};
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use mdns_sd::{ServiceDaemon, ServiceInfo};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -12,6 +12,7 @@ use crate::{
|
||||
/// This function is synchronous and non-blocking. It spawns a background Tokio task
|
||||
/// to handle the mDNS advertisement for the lifetime of the application.
|
||||
pub fn advertise(service_port: u16) -> Result<(), PresenceError> {
|
||||
trace!("starting advertisement process for port {service_port}");
|
||||
let host_id = match PhysicalHost::gather() {
|
||||
Ok(host) => Some(host.host_uuid),
|
||||
Err(e) => {
|
||||
@@ -20,11 +21,15 @@ pub fn advertise(service_port: u16) -> Result<(), PresenceError> {
|
||||
}
|
||||
};
|
||||
|
||||
trace!("Found host id {host_id:?}");
|
||||
|
||||
let instance_name = format!(
|
||||
"inventory-agent-{}",
|
||||
host_id.clone().unwrap_or("unknown".to_string())
|
||||
);
|
||||
|
||||
trace!("Found host id {host_id:?}, name : {instance_name}");
|
||||
|
||||
let spawned_msg = format!("Spawned local presence advertisement task for '{instance_name}'.");
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
||||
@@ -28,7 +28,7 @@ async fn inventory() -> impl Responder {
|
||||
async fn main() -> std::io::Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "8080".to_string());
|
||||
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "25000".to_string());
|
||||
let port = port
|
||||
.parse::<u16>()
|
||||
.expect(&format!("Invalid port number, cannot parse to u16 {port}"));
|
||||
|
||||
@@ -135,15 +135,17 @@ pub fn ingress_path(input: TokenStream) -> TokenStream {
|
||||
|
||||
#[proc_macro]
|
||||
pub fn cidrv4(input: TokenStream) -> TokenStream {
|
||||
let input = parse_macro_input!(input as LitStr);
|
||||
let cidr_str = input.value();
|
||||
let lit = parse_macro_input!(input as LitStr);
|
||||
|
||||
if cidr_str.parse::<cidr::Ipv4Cidr>().is_ok() {
|
||||
let expanded = quote! { #cidr_str.parse::<cidr::Ipv4Cidr>().unwrap() };
|
||||
return TokenStream::from(expanded);
|
||||
}
|
||||
// This is the IMPORTANT part:
|
||||
// we re-emit the *string literal itself*
|
||||
let expanded = quote! {
|
||||
#lit
|
||||
.parse::<cidr::Ipv4Cidr>()
|
||||
.expect("Invalid IPv4 CIDR literal")
|
||||
};
|
||||
|
||||
panic!("Invalid IPv4 CIDR : {}", cidr_str);
|
||||
TokenStream::from(expanded)
|
||||
}
|
||||
|
||||
/// Creates a `harmony_types::net::Url::Url` from a string literal.
|
||||
|
||||
@@ -9,3 +9,4 @@ license.workspace = true
|
||||
serde.workspace = true
|
||||
url.workspace = true
|
||||
rand.workspace = true
|
||||
log.workspace = true
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use log::trace;
|
||||
use serde::Serialize;
|
||||
use std::{fmt, str::FromStr};
|
||||
|
||||
/// Simple error type for port parsing failures.
|
||||
@@ -21,7 +23,7 @@ impl fmt::Display for PortParseError {
|
||||
/// Represents the atomic, physical location of a switch port: `<Stack>/<Module>/<Port>`.
|
||||
///
|
||||
/// Example: `1/1/1`
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Serialize)]
|
||||
pub struct PortLocation(pub u8, pub u8, pub u8);
|
||||
|
||||
impl fmt::Display for PortLocation {
|
||||
@@ -70,6 +72,12 @@ impl FromStr for PortLocation {
|
||||
pub enum PortDeclaration {
|
||||
/// A single switch port defined by its location. Example: `PortDeclaration::Single(1/1/1)`
|
||||
Single(PortLocation),
|
||||
/// A Named port, often used for virtual ports such as PortChannels. Example
|
||||
/// ```rust
|
||||
/// # use harmony_types::switch::PortDeclaration;
|
||||
/// PortDeclaration::Named("1".to_string());
|
||||
/// ```
|
||||
Named(String),
|
||||
/// A strictly sequential range defined by two endpoints using the hyphen separator (`-`).
|
||||
/// All ports between the endpoints (inclusive) are implicitly included.
|
||||
/// Example: `PortDeclaration::Range(1/1/1, 1/1/4)`
|
||||
@@ -130,8 +138,25 @@ impl PortDeclaration {
|
||||
return Ok(PortDeclaration::Set(start_port, end_port));
|
||||
}
|
||||
|
||||
let location = PortLocation::from_str(port_str)?;
|
||||
Ok(PortDeclaration::Single(location))
|
||||
match PortLocation::from_str(port_str) {
|
||||
Ok(loc) => Ok(PortDeclaration::Single(loc)),
|
||||
Err(e) => {
|
||||
let segments: Vec<&str> = port_str.split('/').collect();
|
||||
let segment_count = segments.len();
|
||||
|
||||
// Logic:
|
||||
// If it has 3 segments but failed (e.g., "1/A/1"), it's an InvalidSegment.
|
||||
// If it has MORE than 3 segments (e.g., "1/1/1/1" or "1/1/1/"), it's an InvalidFormat.
|
||||
if segment_count >= 3 {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
// Otherwise, it's something else entirely (e.g., "eth0", "vlan10"),
|
||||
// so we treat it as a Named port.
|
||||
trace!("Falling back on named port for: {port_str}");
|
||||
Ok(PortDeclaration::Named(port_str.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,6 +166,7 @@ impl fmt::Display for PortDeclaration {
|
||||
PortDeclaration::Single(port) => write!(f, "{port}"),
|
||||
PortDeclaration::Range(start, end) => write!(f, "{start}-{end}"),
|
||||
PortDeclaration::Set(start, end) => write!(f, "{start}*{end}"),
|
||||
PortDeclaration::Named(name) => write!(f, "{name}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,11 +106,37 @@ pub struct HAProxy {
|
||||
pub groups: MaybeString,
|
||||
pub users: MaybeString,
|
||||
pub cpus: MaybeString,
|
||||
pub resolvers: MaybeString,
|
||||
pub resolvers: HAProxyResolvers,
|
||||
pub mailers: MaybeString,
|
||||
pub maintenance: Maintenance,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct HAProxyResolvers {
|
||||
#[yaserde(rename = "resolver")]
|
||||
pub resolver: Option<Resolver>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Resolver {
|
||||
pub id: String,
|
||||
pub enabled: i32,
|
||||
pub name: String,
|
||||
pub description: MaybeString,
|
||||
pub nameservers: String,
|
||||
pub parse_resolv_conf: String,
|
||||
pub resolve_retries: i32,
|
||||
pub timeout_resolve: String,
|
||||
pub timeout_retry: String,
|
||||
pub accepted_payload_size: MaybeString,
|
||||
pub hold_valid: MaybeString,
|
||||
pub hold_obsolete: MaybeString,
|
||||
pub hold_refused: MaybeString,
|
||||
pub hold_nx: MaybeString,
|
||||
pub hold_timeout: MaybeString,
|
||||
pub hold_other: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Maintenance {
|
||||
#[yaserde(rename = "cronjobs")]
|
||||
|
||||
@@ -136,6 +136,7 @@ pub struct Rule {
|
||||
pub updated: Option<Updated>,
|
||||
pub created: Option<Created>,
|
||||
pub disabled: Option<MaybeString>,
|
||||
pub log: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -195,7 +196,7 @@ pub struct System {
|
||||
pub disablechecksumoffloading: u8,
|
||||
pub disablesegmentationoffloading: u8,
|
||||
pub disablelargereceiveoffloading: u8,
|
||||
pub ipv6allow: u8,
|
||||
pub ipv6allow: Option<u8>,
|
||||
pub powerd_ac_mode: String,
|
||||
pub powerd_battery_mode: String,
|
||||
pub powerd_normal_mode: String,
|
||||
@@ -216,7 +217,7 @@ pub struct System {
|
||||
pub maximumfrags: Option<MaybeString>,
|
||||
pub aliasesresolveinterval: Option<MaybeString>,
|
||||
pub maximumtableentries: Option<MaybeString>,
|
||||
pub language: String,
|
||||
pub language: Option<String>,
|
||||
pub dnsserver: Option<MaybeString>,
|
||||
pub dns1gw: Option<String>,
|
||||
pub dns2gw: Option<String>,
|
||||
@@ -226,6 +227,7 @@ pub struct System {
|
||||
pub dns6gw: Option<String>,
|
||||
pub dns7gw: Option<String>,
|
||||
pub dns8gw: Option<String>,
|
||||
pub prefer_ipv4: Option<String>,
|
||||
pub dnsallowoverride: u8,
|
||||
pub dnsallowoverride_exclude: Option<MaybeString>,
|
||||
}
|
||||
@@ -329,6 +331,7 @@ pub struct Range {
|
||||
pub struct StaticMap {
|
||||
pub mac: String,
|
||||
pub ipaddr: String,
|
||||
pub cid: Option<MaybeString>,
|
||||
pub hostname: String,
|
||||
pub descr: Option<MaybeString>,
|
||||
pub winsserver: MaybeString,
|
||||
@@ -764,9 +767,19 @@ pub struct Jobs {
|
||||
pub struct Job {
|
||||
#[yaserde(attribute = true)]
|
||||
pub uuid: MaybeString,
|
||||
#[yaserde(rename = "name")]
|
||||
pub name: MaybeString,
|
||||
pub name: Option<MaybeString>,
|
||||
// Add other fields as needed
|
||||
pub origin: Option<MaybeString>,
|
||||
pub enabled: Option<MaybeString>,
|
||||
pub minutes: Option<MaybeString>,
|
||||
pub hours: Option<MaybeString>,
|
||||
pub days: Option<MaybeString>,
|
||||
pub months: Option<MaybeString>,
|
||||
pub weekdays: Option<MaybeString>,
|
||||
pub who: Option<MaybeString>,
|
||||
pub command: Option<MaybeString>,
|
||||
pub parameters: Option<MaybeString>,
|
||||
pub description: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -895,28 +908,28 @@ pub struct Proxy {
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct ProxyGeneral {
|
||||
pub enabled: i8,
|
||||
pub error_pages: String,
|
||||
pub error_pages: Option<MaybeString>,
|
||||
#[yaserde(rename = "icpPort")]
|
||||
pub icp_port: MaybeString,
|
||||
pub logging: Logging,
|
||||
#[yaserde(rename = "alternateDNSservers")]
|
||||
pub alternate_dns_servers: MaybeString,
|
||||
#[yaserde(rename = "dnsV4First")]
|
||||
pub dns_v4_first: i8,
|
||||
pub dns_v4_first: Option<MaybeString>,
|
||||
#[yaserde(rename = "forwardedForHandling")]
|
||||
pub forwarded_for_handling: String,
|
||||
pub forwarded_for_handling: Option<MaybeString>,
|
||||
#[yaserde(rename = "uriWhitespaceHandling")]
|
||||
pub uri_whitespace_handling: String,
|
||||
pub uri_whitespace_handling: Option<MaybeString>,
|
||||
#[yaserde(rename = "enablePinger")]
|
||||
pub enable_pinger: i8,
|
||||
#[yaserde(rename = "useViaHeader")]
|
||||
pub use_via_header: i8,
|
||||
pub use_via_header: Option<MaybeString>,
|
||||
#[yaserde(rename = "suppressVersion")]
|
||||
pub suppress_version: i32,
|
||||
pub suppress_version: Option<MaybeString>,
|
||||
#[yaserde(rename = "connecttimeout")]
|
||||
pub connect_timeout: MaybeString,
|
||||
pub connect_timeout: Option<MaybeString>,
|
||||
#[yaserde(rename = "VisibleEmail")]
|
||||
pub visible_email: String,
|
||||
pub visible_email: Option<MaybeString>,
|
||||
#[yaserde(rename = "VisibleHostname")]
|
||||
pub visible_hostname: MaybeString,
|
||||
pub cache: Cache,
|
||||
@@ -953,7 +966,7 @@ pub struct LocalCache {
|
||||
pub cache_mem: i32,
|
||||
pub maximum_object_size: MaybeString,
|
||||
pub maximum_object_size_in_memory: MaybeString,
|
||||
pub memory_cache_mode: String,
|
||||
pub memory_cache_mode: MaybeString,
|
||||
pub size: i32,
|
||||
pub l1: i32,
|
||||
pub l2: i32,
|
||||
@@ -965,13 +978,13 @@ pub struct LocalCache {
|
||||
pub struct Traffic {
|
||||
pub enabled: i32,
|
||||
#[yaserde(rename = "maxDownloadSize")]
|
||||
pub max_download_size: i32,
|
||||
pub max_download_size: MaybeString,
|
||||
#[yaserde(rename = "maxUploadSize")]
|
||||
pub max_upload_size: i32,
|
||||
pub max_upload_size: MaybeString,
|
||||
#[yaserde(rename = "OverallBandwidthTrotteling")]
|
||||
pub overall_bandwidth_trotteling: i32,
|
||||
pub overall_bandwidth_trotteling: MaybeString,
|
||||
#[yaserde(rename = "perHostTrotteling")]
|
||||
pub per_host_trotteling: i32,
|
||||
pub per_host_trotteling: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -988,7 +1001,7 @@ pub struct ParentProxy {
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Forward {
|
||||
pub interfaces: String,
|
||||
pub interfaces: MaybeString,
|
||||
pub port: i32,
|
||||
pub sslbumpport: i32,
|
||||
pub sslbump: i32,
|
||||
@@ -1033,9 +1046,9 @@ pub struct Acl {
|
||||
pub google_apps: MaybeString,
|
||||
pub youtube: MaybeString,
|
||||
#[yaserde(rename = "safePorts")]
|
||||
pub safe_ports: String,
|
||||
pub safe_ports: MaybeString,
|
||||
#[yaserde(rename = "sslPorts")]
|
||||
pub ssl_ports: String,
|
||||
pub ssl_ports: MaybeString,
|
||||
#[yaserde(rename = "remoteACLs")]
|
||||
pub remote_acls: RemoteAcls,
|
||||
}
|
||||
@@ -1051,9 +1064,9 @@ pub struct RemoteAcls {
|
||||
pub struct Icap {
|
||||
pub enable: i32,
|
||||
#[yaserde(rename = "RequestURL")]
|
||||
pub request_url: String,
|
||||
pub request_url: MaybeString,
|
||||
#[yaserde(rename = "ResponseURL")]
|
||||
pub response_url: String,
|
||||
pub response_url: MaybeString,
|
||||
#[yaserde(rename = "SendClientIP")]
|
||||
pub send_client_ip: i32,
|
||||
#[yaserde(rename = "SendUsername")]
|
||||
@@ -1061,7 +1074,7 @@ pub struct Icap {
|
||||
#[yaserde(rename = "EncodeUsername")]
|
||||
pub encode_username: i32,
|
||||
#[yaserde(rename = "UsernameHeader")]
|
||||
pub username_header: String,
|
||||
pub username_header: MaybeString,
|
||||
#[yaserde(rename = "EnablePreview")]
|
||||
pub enable_preview: i32,
|
||||
#[yaserde(rename = "PreviewSize")]
|
||||
@@ -1076,9 +1089,9 @@ pub struct Authentication {
|
||||
pub method: MaybeString,
|
||||
#[yaserde(rename = "authEnforceGroup")]
|
||||
pub auth_enforce_group: MaybeString,
|
||||
pub realm: String,
|
||||
pub credentialsttl: i32, // This field is already in snake_case
|
||||
pub children: i32,
|
||||
pub realm: MaybeString,
|
||||
pub credentialsttl: MaybeString, // This field is already in snake_case
|
||||
pub children: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1140,6 +1153,7 @@ pub struct UnboundGeneral {
|
||||
pub local_zone_type: String,
|
||||
pub outgoing_interface: MaybeString,
|
||||
pub enable_wpad: MaybeString,
|
||||
pub safesearch: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1193,15 +1207,15 @@ pub struct Acls {
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Dnsbl {
|
||||
pub enabled: i32,
|
||||
pub safesearch: MaybeString,
|
||||
pub enabled: Option<i32>,
|
||||
pub safesearch: Option<MaybeString>,
|
||||
#[yaserde(rename = "type")]
|
||||
pub r#type: MaybeString,
|
||||
pub lists: MaybeString,
|
||||
pub whitelists: MaybeString,
|
||||
pub blocklists: MaybeString,
|
||||
pub wildcards: MaybeString,
|
||||
pub address: MaybeString,
|
||||
pub r#type: Option<MaybeString>,
|
||||
pub lists: Option<MaybeString>,
|
||||
pub whitelists: Option<MaybeString>,
|
||||
pub blocklists: Option<MaybeString>,
|
||||
pub wildcards: Option<MaybeString>,
|
||||
pub address: Option<MaybeString>,
|
||||
pub nxdomain: Option<i32>,
|
||||
}
|
||||
|
||||
@@ -1229,6 +1243,7 @@ pub struct Host {
|
||||
pub ttl: Option<MaybeString>,
|
||||
pub server: String,
|
||||
pub description: Option<String>,
|
||||
pub txtdata: MaybeString,
|
||||
}
|
||||
|
||||
impl Host {
|
||||
@@ -1244,6 +1259,7 @@ impl Host {
|
||||
ttl: Some(MaybeString::default()),
|
||||
mx: MaybeString::default(),
|
||||
description: None,
|
||||
txtdata: MaybeString::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1293,6 +1309,7 @@ pub struct WireguardServerItem {
|
||||
pub peers: String,
|
||||
pub endpoint: MaybeString,
|
||||
pub peer_dns: MaybeString,
|
||||
pub debug: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1477,6 +1494,7 @@ pub struct Ppp {
|
||||
pub ports: Option<MaybeString>,
|
||||
pub username: Option<MaybeString>,
|
||||
pub password: Option<MaybeString>,
|
||||
pub provider: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
|
||||
@@ -86,10 +86,7 @@ impl<'a> DhcpConfigLegacyISC<'a> {
|
||||
mac,
|
||||
ipaddr: ipaddr.to_string(),
|
||||
hostname,
|
||||
descr: Default::default(),
|
||||
winsserver: Default::default(),
|
||||
dnsserver: Default::default(),
|
||||
ntpserver: Default::default(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
existing_mappings.push(static_map);
|
||||
@@ -126,9 +123,7 @@ impl<'a> DhcpConfigLegacyISC<'a> {
|
||||
ipaddr: entry["ipaddr"].as_str().unwrap_or_default().to_string(),
|
||||
hostname: entry["hostname"].as_str().unwrap_or_default().to_string(),
|
||||
descr: entry["descr"].as_str().map(MaybeString::from),
|
||||
winsserver: MaybeString::default(),
|
||||
dnsserver: MaybeString::default(),
|
||||
ntpserver: MaybeString::default(),
|
||||
..Default::default()
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
||||
@@ -612,6 +612,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -2003,6 +2003,7 @@
|
||||
<cacheflush/>
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<safesearch/>
|
||||
<enable_wpad/>
|
||||
</general>
|
||||
<advanced>
|
||||
@@ -2071,6 +2072,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
|
||||
<enabled>1</enabled>
|
||||
@@ -2081,6 +2083,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
|
||||
<enabled>1</enabled>
|
||||
@@ -2091,6 +2094,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
</hosts>
|
||||
<aliases/>
|
||||
@@ -2117,6 +2121,7 @@
|
||||
<endpoint/>
|
||||
<peer_dns/>
|
||||
<carp_depend_on/>
|
||||
<debug/>
|
||||
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
|
||||
</server>
|
||||
</servers>
|
||||
|
||||
@@ -614,6 +614,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -750,6 +750,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -709,6 +709,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -951,6 +951,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -808,6 +808,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
@@ -726,6 +726,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
@@ -793,6 +794,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
|
||||
<enabled>1</enabled>
|
||||
@@ -803,6 +805,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
|
||||
<enabled>1</enabled>
|
||||
@@ -813,6 +816,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
</hosts>
|
||||
<aliases/>
|
||||
@@ -840,6 +844,7 @@
|
||||
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
|
||||
<endpoint/>
|
||||
<peer_dns/>
|
||||
<debug/>
|
||||
</server>
|
||||
</servers>
|
||||
</server>
|
||||
|
||||
@@ -718,6 +718,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
@@ -785,6 +786,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
|
||||
<enabled>1</enabled>
|
||||
@@ -795,6 +797,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
|
||||
<enabled>1</enabled>
|
||||
@@ -805,6 +808,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
</hosts>
|
||||
<aliases/>
|
||||
@@ -832,6 +836,7 @@
|
||||
<gateway/>
|
||||
<carp_depend_on/>
|
||||
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
|
||||
<debug/>
|
||||
</server>
|
||||
</servers>
|
||||
</server>
|
||||
|
||||
@@ -869,6 +869,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
@@ -862,6 +862,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
@@ -869,6 +869,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
Reference in New Issue
Block a user