Merge pull request 'refact: Move basic types to harmony_types crate to avoid external dependencies.' (#126) from refact/harmony_types into feat/inventory_persistence
All checks were successful
Run Check Script / check (pull_request) Successful in 1m15s
All checks were successful
Run Check Script / check (pull_request) Successful in 1m15s
Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/126 Reviewed-by: Ian Letourneau <ian@noma.to>
This commit is contained in:
commit
ea1380f98a
11
Cargo.lock
generated
11
Cargo.lock
generated
@ -1671,6 +1671,7 @@ dependencies = [
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_types",
|
||||
"logging",
|
||||
"tokio",
|
||||
"url",
|
||||
@ -1732,6 +1733,7 @@ dependencies = [
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
@ -1743,6 +1745,7 @@ dependencies = [
|
||||
"cidr",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_types",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
@ -2293,7 +2296,6 @@ dependencies = [
|
||||
"opnsense-config",
|
||||
"opnsense-config-xml",
|
||||
"pretty_assertions",
|
||||
"rand 0.9.1",
|
||||
"reqwest 0.11.27",
|
||||
"russh",
|
||||
"rust-ipmi",
|
||||
@ -2364,9 +2366,12 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"env_logger",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"local-ip-address",
|
||||
"log",
|
||||
"mdns-sd 0.14.1 (git+https://github.com/jggc/mdns-sd.git?branch=patch-1)",
|
||||
"reqwest 0.12.20",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sysinfo",
|
||||
@ -2437,7 +2442,9 @@ dependencies = [
|
||||
name = "harmony_types"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"rand 0.9.1",
|
||||
"serde",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3849,7 +3856,7 @@ dependencies = [
|
||||
"env_logger",
|
||||
"log",
|
||||
"pretty_assertions",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.1",
|
||||
"serde",
|
||||
"thiserror 2.0.14",
|
||||
"tokio",
|
||||
|
@ -36,7 +36,7 @@ tokio = { version = "1.40", features = [
|
||||
cidr = { features = ["serde"], version = "0.2" }
|
||||
russh = "0.45"
|
||||
russh-keys = "0.45"
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
url = "2.5"
|
||||
kube = { version = "1.1.0", features = [
|
||||
"config",
|
||||
@ -67,3 +67,4 @@ serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
askama = "0.14"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
||||
|
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Binary file not shown.
@ -7,8 +7,9 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
env_logger.workspace = true
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
logging = "0.1.0"
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
@ -1,15 +1,16 @@
|
||||
use std::{path::PathBuf, str::FromStr, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
|
||||
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url, tenant::TenantConfig},
|
||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
@ -2,8 +2,9 @@ use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::lamp::{LAMPConfig, LAMPScore},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
@ -6,8 +6,9 @@ readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony_macros = { version = "0.1.0", path = "../../harmony_macros" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
@ -22,8 +22,9 @@ use harmony::{
|
||||
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
@ -7,7 +7,8 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
cidr.workspace = true
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
@ -1,7 +1,6 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
monitoring::{
|
||||
@ -19,10 +18,12 @@ use harmony::{
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{
|
||||
K8sAnywhereTopology, Url,
|
||||
K8sAnywhereTopology,
|
||||
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
@ -18,9 +18,10 @@ use harmony::{
|
||||
},
|
||||
tftp::TftpScore,
|
||||
},
|
||||
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||
topology::{LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@ -86,8 +87,7 @@ async fn main() {
|
||||
let inventory = Inventory {
|
||||
location: Location::new("I am mobile".to_string(), "earth".to_string()),
|
||||
switch: SwitchGroup::from([]),
|
||||
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||
storage_host: vec![],
|
||||
worker_host: vec![
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
|
@ -69,8 +69,7 @@ pub fn get_inventory() -> Inventory {
|
||||
"testopnsense".to_string(),
|
||||
),
|
||||
switch: SwitchGroup::from([]),
|
||||
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||
storage_host: vec![],
|
||||
worker_host: vec![],
|
||||
control_plane_host: vec![],
|
||||
|
@ -15,9 +15,10 @@ use harmony::{
|
||||
opnsense::OPNsenseShellCommandScore,
|
||||
tftp::TftpScore,
|
||||
},
|
||||
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||
topology::{LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@ -62,8 +63,7 @@ async fn main() {
|
||||
"wk".to_string(),
|
||||
),
|
||||
switch: SwitchGroup::from([]),
|
||||
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||
storage_host: vec![],
|
||||
worker_host: vec![],
|
||||
control_plane_host: vec![
|
||||
|
@ -11,8 +11,9 @@ use harmony::{
|
||||
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
||||
},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
@ -1,11 +1,11 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
modules::tenant::TenantScore,
|
||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
@ -9,7 +9,6 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
rand = "0.9"
|
||||
hex = "0.4"
|
||||
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
|
||||
russh = "0.45.0"
|
||||
|
@ -12,4 +12,12 @@ lazy_static! {
|
||||
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
|
||||
pub static ref DRY_RUN: bool =
|
||||
std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false));
|
||||
pub static ref DEFAULT_DATABASE_URL: String = "sqlite://harmony.sqlite".to_string();
|
||||
pub static ref DATABASE_URL: String = std::env::var("HARMONY_DATABASE_URL")
|
||||
.map(|value| if value.is_empty() {
|
||||
(*DEFAULT_DATABASE_URL).clone()
|
||||
} else {
|
||||
value
|
||||
})
|
||||
.unwrap_or((*DEFAULT_DATABASE_URL).clone());
|
||||
}
|
||||
|
@ -1,6 +1,4 @@
|
||||
mod file;
|
||||
mod id;
|
||||
mod version;
|
||||
pub use file::*;
|
||||
pub use id::*;
|
||||
pub use version::*;
|
||||
|
@ -1,8 +1,7 @@
|
||||
use std::fmt;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::topology::IpAddress;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExecutorError {
|
||||
|
@ -1,24 +1,24 @@
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
use std::sync::Arc;
|
||||
|
||||
use derive_new::new;
|
||||
use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive};
|
||||
use harmony_types::net::MacAddress;
|
||||
use serde::{Deserialize, Serialize, Serializer, ser::SerializeStruct};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_value::Value;
|
||||
|
||||
pub type HostGroup = Vec<PhysicalHost>;
|
||||
pub type SwitchGroup = Vec<Switch>;
|
||||
pub type FirewallGroup = Vec<PhysicalHost>;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct PhysicalHost {
|
||||
pub id: Id,
|
||||
pub category: HostCategory,
|
||||
pub network: Vec<NetworkInterface>,
|
||||
pub management: Arc<dyn ManagementInterface>,
|
||||
pub storage: Vec<Storage>,
|
||||
pub storage: Vec<StorageDrive>,
|
||||
pub labels: Vec<Label>,
|
||||
pub memory_size: Option<u64>,
|
||||
pub cpu_count: Option<u64>,
|
||||
pub memory_modules: Vec<MemoryModule>,
|
||||
pub cpus: Vec<CPU>,
|
||||
}
|
||||
|
||||
impl PhysicalHost {
|
||||
@ -29,12 +29,128 @@ impl PhysicalHost {
|
||||
network: vec![],
|
||||
storage: vec![],
|
||||
labels: vec![],
|
||||
management: Arc::new(ManualManagementInterface {}),
|
||||
memory_size: None,
|
||||
cpu_count: None,
|
||||
memory_modules: vec![],
|
||||
cpus: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn summary(&self) -> String {
|
||||
let mut parts = Vec::new();
|
||||
|
||||
// Part 1: System Model (from labels) or Category as a fallback
|
||||
let model = self
|
||||
.labels
|
||||
.iter()
|
||||
.find(|l| l.name == "system-product-name" || l.name == "model")
|
||||
.map(|l| l.value.clone())
|
||||
.unwrap_or_else(|| self.category.to_string());
|
||||
parts.push(model);
|
||||
|
||||
// Part 2: CPU Information
|
||||
if !self.cpus.is_empty() {
|
||||
let cpu_count = self.cpus.len();
|
||||
let total_cores = self.cpus.iter().map(|c| c.cores).sum::<u32>();
|
||||
let total_threads = self.cpus.iter().map(|c| c.threads).sum::<u32>();
|
||||
let model_name = &self.cpus[0].model;
|
||||
|
||||
let cpu_summary = if cpu_count > 1 {
|
||||
format!(
|
||||
"{}x {} ({}c/{}t)",
|
||||
cpu_count, model_name, total_cores, total_threads
|
||||
)
|
||||
} else {
|
||||
format!("{} ({}c/{}t)", model_name, total_cores, total_threads)
|
||||
};
|
||||
parts.push(cpu_summary);
|
||||
}
|
||||
|
||||
// Part 3: Memory Information
|
||||
if !self.memory_modules.is_empty() {
|
||||
let total_mem_bytes = self
|
||||
.memory_modules
|
||||
.iter()
|
||||
.map(|m| m.size_bytes)
|
||||
.sum::<u64>();
|
||||
let total_mem_gb = (total_mem_bytes as f64 / (1024.0 * 1024.0 * 1024.0)).round() as u64;
|
||||
|
||||
// Find the most common speed among modules
|
||||
let mut speeds = std::collections::HashMap::new();
|
||||
for module in &self.memory_modules {
|
||||
if let Some(speed) = module.speed_mhz {
|
||||
*speeds.entry(speed).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
let common_speed = speeds
|
||||
.into_iter()
|
||||
.max_by_key(|&(_, count)| count)
|
||||
.map(|(speed, _)| speed);
|
||||
|
||||
if let Some(speed) = common_speed {
|
||||
parts.push(format!("{} GB RAM @ {}MHz", total_mem_gb, speed));
|
||||
} else {
|
||||
parts.push(format!("{} GB RAM", total_mem_gb));
|
||||
}
|
||||
}
|
||||
|
||||
// Part 4: Storage Information
|
||||
if !self.storage.is_empty() {
|
||||
let total_storage_bytes = self.storage.iter().map(|d| d.size_bytes).sum::<u64>();
|
||||
let drive_count = self.storage.len();
|
||||
let first_drive_model = &self.storage[0].model;
|
||||
|
||||
// Helper to format bytes into TB or GB
|
||||
let format_storage = |bytes: u64| {
|
||||
let tb = bytes as f64 / (1024.0 * 1024.0 * 1024.0 * 1024.0);
|
||||
if tb >= 1.0 {
|
||||
format!("{:.2} TB", tb)
|
||||
} else {
|
||||
let gb = bytes as f64 / (1024.0 * 1024.0 * 1024.0);
|
||||
format!("{:.0} GB", gb)
|
||||
}
|
||||
};
|
||||
|
||||
let storage_summary = if drive_count > 1 {
|
||||
format!(
|
||||
"{} Storage ({}x {})",
|
||||
format_storage(total_storage_bytes),
|
||||
drive_count,
|
||||
first_drive_model
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{} Storage ({})",
|
||||
format_storage(total_storage_bytes),
|
||||
first_drive_model
|
||||
)
|
||||
};
|
||||
parts.push(storage_summary);
|
||||
}
|
||||
|
||||
// Part 5: Network Information
|
||||
// Prioritize an "up" interface with an IPv4 address
|
||||
let best_nic = self
|
||||
.network
|
||||
.iter()
|
||||
.find(|n| n.is_up && !n.ipv4_addresses.is_empty())
|
||||
.or_else(|| self.network.first());
|
||||
|
||||
if let Some(nic) = best_nic {
|
||||
let speed = nic
|
||||
.speed_mbps
|
||||
.map(|s| format!("{}Gbps", s / 1000))
|
||||
.unwrap_or_else(|| "N/A".to_string());
|
||||
let mac = nic.mac_address.to_string();
|
||||
let nic_summary = if let Some(ip) = nic.ipv4_addresses.first() {
|
||||
format!("NIC: {} ({}, {})", speed, ip, mac)
|
||||
} else {
|
||||
format!("NIC: {} ({})", speed, mac)
|
||||
};
|
||||
parts.push(nic_summary);
|
||||
}
|
||||
|
||||
parts.join(" | ")
|
||||
}
|
||||
|
||||
pub fn cluster_mac(&self) -> MacAddress {
|
||||
self.network
|
||||
.first()
|
||||
@ -42,37 +158,17 @@ impl PhysicalHost {
|
||||
.mac_address
|
||||
}
|
||||
|
||||
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {
|
||||
self.cpu_count = cpu_count;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn memory_size(mut self, memory_size: Option<u64>) -> Self {
|
||||
self.memory_size = memory_size;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn storage(
|
||||
mut self,
|
||||
connection: StorageConnectionType,
|
||||
kind: StorageKind,
|
||||
size: u64,
|
||||
serial: String,
|
||||
) -> Self {
|
||||
self.storage.push(Storage {
|
||||
connection,
|
||||
kind,
|
||||
size,
|
||||
serial,
|
||||
});
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mac_address(mut self, mac_address: MacAddress) -> Self {
|
||||
self.network.push(NetworkInterface {
|
||||
name: None,
|
||||
name: String::new(),
|
||||
mac_address,
|
||||
speed: None,
|
||||
speed_mbps: None,
|
||||
is_up: false,
|
||||
mtu: 0,
|
||||
ipv4_addresses: vec![],
|
||||
ipv6_addresses: vec![],
|
||||
driver: String::new(),
|
||||
firmware_version: None,
|
||||
});
|
||||
self
|
||||
}
|
||||
@ -81,57 +177,52 @@ impl PhysicalHost {
|
||||
self.labels.push(Label { name, value });
|
||||
self
|
||||
}
|
||||
|
||||
pub fn management(mut self, management: Arc<dyn ManagementInterface>) -> Self {
|
||||
self.management = management;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
// Custom Serialize implementation for PhysicalHost
|
||||
impl Serialize for PhysicalHost {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
// Determine the number of fields
|
||||
let mut num_fields = 5; // category, network, storage, labels, management
|
||||
if self.memory_size.is_some() {
|
||||
num_fields += 1;
|
||||
}
|
||||
if self.cpu_count.is_some() {
|
||||
num_fields += 1;
|
||||
}
|
||||
|
||||
// Create a serialization structure
|
||||
let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
|
||||
|
||||
// Serialize the standard fields
|
||||
state.serialize_field("category", &self.category)?;
|
||||
state.serialize_field("network", &self.network)?;
|
||||
state.serialize_field("storage", &self.storage)?;
|
||||
state.serialize_field("labels", &self.labels)?;
|
||||
|
||||
// Serialize optional fields
|
||||
if let Some(memory) = self.memory_size {
|
||||
state.serialize_field("memory_size", &memory)?;
|
||||
}
|
||||
if let Some(cpu) = self.cpu_count {
|
||||
state.serialize_field("cpu_count", &cpu)?;
|
||||
}
|
||||
|
||||
let mgmt_data = self.management.serialize_management();
|
||||
// pub management: Arc<dyn ManagementInterface>,
|
||||
|
||||
// Handle management interface - either as a field or flattened
|
||||
state.serialize_field("management", &mgmt_data)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
// impl Serialize for PhysicalHost {
|
||||
// fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
// where
|
||||
// S: Serializer,
|
||||
// {
|
||||
// // Determine the number of fields
|
||||
// let mut num_fields = 5; // category, network, storage, labels, management
|
||||
// if self.memory_modules.is_some() {
|
||||
// num_fields += 1;
|
||||
// }
|
||||
// if self.cpus.is_some() {
|
||||
// num_fields += 1;
|
||||
// }
|
||||
//
|
||||
// // Create a serialization structure
|
||||
// let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
|
||||
//
|
||||
// // Serialize the standard fields
|
||||
// state.serialize_field("category", &self.category)?;
|
||||
// state.serialize_field("network", &self.network)?;
|
||||
// state.serialize_field("storage", &self.storage)?;
|
||||
// state.serialize_field("labels", &self.labels)?;
|
||||
//
|
||||
// // Serialize optional fields
|
||||
// if let Some(memory) = self.memory_modules {
|
||||
// state.serialize_field("memory_size", &memory)?;
|
||||
// }
|
||||
// if let Some(cpu) = self.cpus {
|
||||
// state.serialize_field("cpu_count", &cpu)?;
|
||||
// }
|
||||
//
|
||||
// let mgmt_data = self.management.serialize_management();
|
||||
// // pub management: Arc<dyn ManagementInterface>,
|
||||
//
|
||||
// // Handle management interface - either as a field or flattened
|
||||
// state.serialize_field("management", &mgmt_data)?;
|
||||
//
|
||||
// state.end()
|
||||
// }
|
||||
// }
|
||||
|
||||
impl<'de> Deserialize<'de> for PhysicalHost {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
@ -189,61 +280,10 @@ pub enum HostCategory {
|
||||
Switch,
|
||||
}
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct NetworkInterface {
|
||||
pub name: Option<String>,
|
||||
pub mac_address: MacAddress,
|
||||
pub speed: Option<u64>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
use harmony_macros::mac_address;
|
||||
|
||||
use crate::data::Id;
|
||||
#[cfg(test)]
|
||||
impl NetworkInterface {
|
||||
pub fn dummy() -> Self {
|
||||
Self {
|
||||
name: Some(String::new()),
|
||||
mac_address: mac_address!("00:00:00:00:00:00"),
|
||||
speed: Some(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub enum StorageConnectionType {
|
||||
Sata3g,
|
||||
Sata6g,
|
||||
Sas6g,
|
||||
Sas12g,
|
||||
PCIE,
|
||||
}
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum StorageKind {
|
||||
SSD,
|
||||
NVME,
|
||||
HDD,
|
||||
}
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct Storage {
|
||||
pub connection: StorageConnectionType,
|
||||
pub kind: StorageKind,
|
||||
pub size: u64,
|
||||
pub serial: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl Storage {
|
||||
pub fn dummy() -> Self {
|
||||
Self {
|
||||
connection: StorageConnectionType::Sata3g,
|
||||
kind: StorageKind::SSD,
|
||||
size: 0,
|
||||
serial: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Switch {
|
||||
@ -274,117 +314,43 @@ impl Location {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HostCategory {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
HostCategory::Server => write!(f, "Server"),
|
||||
HostCategory::Firewall => write!(f, "Firewall"),
|
||||
HostCategory::Switch => write!(f, "Switch"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Label {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {}", self.name, self.value)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Location {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Address: {}, Name: {}", self.address, self.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PhysicalHost {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.summary())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Switch {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Switch with {} interfaces", self._interface.len())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
// Mock implementation of ManagementInterface
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct MockHPIlo {
|
||||
ip: String,
|
||||
username: String,
|
||||
password: String,
|
||||
firmware_version: String,
|
||||
}
|
||||
|
||||
impl ManagementInterface for MockHPIlo {
|
||||
fn boot_to_pxe(&self) {}
|
||||
|
||||
fn get_supported_protocol_names(&self) -> String {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
// Another mock implementation
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct MockDellIdrac {
|
||||
hostname: String,
|
||||
port: u16,
|
||||
api_token: String,
|
||||
}
|
||||
|
||||
impl ManagementInterface for MockDellIdrac {
|
||||
fn boot_to_pxe(&self) {}
|
||||
|
||||
fn get_supported_protocol_names(&self) -> String {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_physical_host_with_hp_ilo() {
|
||||
// Create a PhysicalHost with HP iLO management
|
||||
let host = PhysicalHost {
|
||||
id: Id::empty(),
|
||||
category: HostCategory::Server,
|
||||
network: vec![NetworkInterface::dummy()],
|
||||
management: Arc::new(MockHPIlo {
|
||||
ip: "192.168.1.100".to_string(),
|
||||
username: "admin".to_string(),
|
||||
password: "password123".to_string(),
|
||||
firmware_version: "2.5.0".to_string(),
|
||||
}),
|
||||
storage: vec![Storage::dummy()],
|
||||
labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())],
|
||||
memory_size: Some(64_000_000),
|
||||
cpu_count: Some(16),
|
||||
};
|
||||
|
||||
// Serialize to JSON
|
||||
let json = serde_json::to_string(&host).expect("Failed to serialize host");
|
||||
|
||||
// Check that the serialized JSON contains the HP iLO details
|
||||
assert!(json.contains("192.168.1.100"));
|
||||
assert!(json.contains("admin"));
|
||||
assert!(json.contains("password123"));
|
||||
assert!(json.contains("firmware_version"));
|
||||
assert!(json.contains("2.5.0"));
|
||||
|
||||
// Parse back to verify structure (not the exact management interface)
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
|
||||
|
||||
// Verify basic structure
|
||||
assert_eq!(parsed["cpu_count"], 16);
|
||||
assert_eq!(parsed["memory_size"], 64_000_000);
|
||||
assert_eq!(parsed["network"][0]["name"], "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_physical_host_with_dell_idrac() {
|
||||
// Create a PhysicalHost with Dell iDRAC management
|
||||
let host = PhysicalHost {
|
||||
id: Id::empty(),
|
||||
category: HostCategory::Server,
|
||||
network: vec![NetworkInterface::dummy()],
|
||||
management: Arc::new(MockDellIdrac {
|
||||
hostname: "idrac-server01".to_string(),
|
||||
port: 443,
|
||||
api_token: "abcdef123456".to_string(),
|
||||
}),
|
||||
storage: vec![Storage::dummy()],
|
||||
labels: vec![Label::new("env".to_string(), "production".to_string())],
|
||||
memory_size: Some(128_000_000),
|
||||
cpu_count: Some(32),
|
||||
};
|
||||
|
||||
// Serialize to JSON
|
||||
let json = serde_json::to_string(&host).expect("Failed to serialize host");
|
||||
|
||||
// Check that the serialized JSON contains the Dell iDRAC details
|
||||
assert!(json.contains("idrac-server01"));
|
||||
assert!(json.contains("443"));
|
||||
assert!(json.contains("abcdef123456"));
|
||||
|
||||
// Parse back to verify structure
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
|
||||
|
||||
// Verify basic structure
|
||||
assert_eq!(parsed["cpu_count"], 32);
|
||||
assert_eq!(parsed["memory_size"], 128_000_000);
|
||||
assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_management_implementations_produce_valid_json() {
|
||||
@ -393,31 +359,20 @@ mod tests {
|
||||
id: Id::empty(),
|
||||
category: HostCategory::Server,
|
||||
network: vec![],
|
||||
management: Arc::new(MockHPIlo {
|
||||
ip: "10.0.0.1".to_string(),
|
||||
username: "root".to_string(),
|
||||
password: "secret".to_string(),
|
||||
firmware_version: "3.0.0".to_string(),
|
||||
}),
|
||||
storage: vec![],
|
||||
labels: vec![],
|
||||
memory_size: None,
|
||||
cpu_count: None,
|
||||
memory_modules: vec![],
|
||||
cpus: vec![],
|
||||
};
|
||||
|
||||
let host2 = PhysicalHost {
|
||||
id: Id::empty(),
|
||||
category: HostCategory::Server,
|
||||
network: vec![],
|
||||
management: Arc::new(MockDellIdrac {
|
||||
hostname: "server02-idrac".to_string(),
|
||||
port: 8443,
|
||||
api_token: "token123".to_string(),
|
||||
}),
|
||||
storage: vec![],
|
||||
labels: vec![],
|
||||
memory_size: None,
|
||||
cpu_count: None,
|
||||
memory_modules: vec![],
|
||||
cpus: vec![],
|
||||
};
|
||||
|
||||
// Both should serialize successfully
|
||||
@ -427,8 +382,5 @@ mod tests {
|
||||
// Both JSONs should be valid and parseable
|
||||
let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1");
|
||||
let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2");
|
||||
|
||||
// The JSONs should be different because they contain different management interfaces
|
||||
assert_ne!(json1, json2);
|
||||
}
|
||||
}
|
||||
|
@ -1,13 +1,11 @@
|
||||
use harmony_types::id::Id;
|
||||
use std::error::Error;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
|
||||
use super::{
|
||||
data::{Id, Version},
|
||||
executors::ExecutorError,
|
||||
inventory::Inventory,
|
||||
topology::PreparationError,
|
||||
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
||||
};
|
||||
|
||||
pub enum InterpretName {
|
||||
|
@ -18,6 +18,8 @@ impl InventoryFilter {
|
||||
use derive_new::new;
|
||||
use log::info;
|
||||
|
||||
use crate::hardware::{ManagementInterface, ManualManagementInterface};
|
||||
|
||||
use super::{
|
||||
filter::Filter,
|
||||
hardware::{FirewallGroup, HostGroup, Location, SwitchGroup},
|
||||
@ -30,7 +32,7 @@ pub struct Inventory {
|
||||
// Firewall is really just a host but with somewhat specialized hardware
|
||||
// I'm not entirely sure it belongs to its own category but it helps make things easier and
|
||||
// clearer for now so let's try it this way.
|
||||
pub firewall: FirewallGroup,
|
||||
pub firewall_mgmt: Box<dyn ManagementInterface>,
|
||||
pub worker_host: HostGroup,
|
||||
pub storage_host: HostGroup,
|
||||
pub control_plane_host: HostGroup,
|
||||
@ -41,7 +43,7 @@ impl Inventory {
|
||||
Self {
|
||||
location: Location::new("Empty".to_string(), "location".to_string()),
|
||||
switch: vec![],
|
||||
firewall: vec![],
|
||||
firewall_mgmt: Box::new(ManualManagementInterface {}),
|
||||
worker_host: vec![],
|
||||
storage_host: vec![],
|
||||
control_plane_host: vec![],
|
||||
@ -52,7 +54,7 @@ impl Inventory {
|
||||
Self {
|
||||
location: Location::test_building(),
|
||||
switch: SwitchGroup::new(),
|
||||
firewall: FirewallGroup::new(),
|
||||
firewall_mgmt: Box::new(ManualManagementInterface {}),
|
||||
worker_host: HostGroup::new(),
|
||||
storage_host: HostGroup::new(),
|
||||
control_plane_host: HostGroup::new(),
|
||||
|
@ -1,3 +1,4 @@
|
||||
use harmony_types::id::Id;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@ -5,7 +6,6 @@ use serde::Serialize;
|
||||
use serde_value::Value;
|
||||
|
||||
use super::{
|
||||
data::Id,
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
interpret::{Interpret, InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
|
@ -1,6 +1,7 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::net::Url;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
|
||||
@ -26,7 +27,6 @@ use super::Router;
|
||||
use super::TftpServer;
|
||||
|
||||
use super::Topology;
|
||||
use super::Url;
|
||||
use super::k8s::K8sClient;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
use crate::{data::FileContent, executors::ExecutorError};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::{IpAddress, Url};
|
||||
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::Url;
|
||||
#[async_trait]
|
||||
pub trait HttpServer: Send + Sync {
|
||||
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
|
||||
|
@ -4,8 +4,9 @@ use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
|
||||
use super::{IpAddress, LogicalHost};
|
||||
use super::LogicalHost;
|
||||
use crate::executors::ExecutorError;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
impl std::fmt::Debug for dyn LoadBalancer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
|
@ -1,4 +1,5 @@
|
||||
mod ha_cluster;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
pub mod installable;
|
||||
@ -32,7 +33,6 @@ use super::{
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
};
|
||||
use std::error::Error;
|
||||
use std::net::IpAddr;
|
||||
|
||||
/// Represents a logical view of an infrastructure environment providing specific capabilities.
|
||||
///
|
||||
@ -196,35 +196,6 @@ pub trait MultiTargetTopology: Topology {
|
||||
fn current_target(&self) -> DeploymentTarget;
|
||||
}
|
||||
|
||||
pub type IpAddress = IpAddr;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Url {
|
||||
LocalFolder(String),
|
||||
Url(url::Url),
|
||||
}
|
||||
|
||||
impl Serialize for Url {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Url::LocalFolder(path) => serializer.serialize_str(path),
|
||||
Url::Url(url) => serializer.serialize_str(url.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Url {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Url::LocalFolder(path) => write!(f, "{}", path),
|
||||
Url::Url(url) => write!(f, "{}", url),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a logical member of a cluster that provides one or more services.
|
||||
///
|
||||
/// A LogicalHost can represent various roles within the infrastructure, such as:
|
||||
@ -263,7 +234,8 @@ impl LogicalHost {
|
||||
///
|
||||
/// ```
|
||||
/// use std::str::FromStr;
|
||||
/// use harmony::topology::{IpAddress, LogicalHost};
|
||||
/// use harmony::topology::{LogicalHost};
|
||||
/// use harmony_types::net::IpAddress;
|
||||
///
|
||||
/// let start_ip = IpAddress::from_str("192.168.0.20").unwrap();
|
||||
/// let hosts = LogicalHost::create_hosts(3, start_ip, "worker");
|
||||
@ -319,7 +291,7 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use harmony_types::net::Url;
|
||||
use serde_json;
|
||||
|
||||
#[test]
|
||||
|
@ -1,12 +1,12 @@
|
||||
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::net::{IpAddress, MacAddress};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
use super::{IpAddress, LogicalHost, k8s::K8sClient};
|
||||
use super::{LogicalHost, k8s::K8sClient};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DHCPStaticEntry {
|
||||
|
@ -4,11 +4,12 @@ use async_trait::async_trait;
|
||||
use log::debug;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::{Topology, installable::Installable},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[async_trait]
|
||||
pub trait AlertSender: Send + Sync + std::fmt::Debug {
|
||||
|
@ -2,7 +2,7 @@ pub mod k8s;
|
||||
mod manager;
|
||||
pub mod network_policy;
|
||||
|
||||
use crate::data::Id;
|
||||
use harmony_types::id::Id;
|
||||
pub use manager::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str::FromStr;
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::executors::ExecutorError;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::{IpAddress, Url};
|
||||
use harmony_types::net::{IpAddress, Url};
|
||||
|
||||
#[async_trait]
|
||||
pub trait TftpServer: Send + Sync {
|
||||
|
@ -3,11 +3,9 @@ use std::sync::Arc;
|
||||
|
||||
use russh::{client, keys::key};
|
||||
|
||||
use crate::{
|
||||
domain::executors::{ExecutorError, SshClient},
|
||||
topology::IpAddress,
|
||||
};
|
||||
use crate::domain::executors::{ExecutorError, SshClient};
|
||||
|
||||
use harmony_types::net::IpAddress;
|
||||
pub struct RusshClient;
|
||||
|
||||
#[async_trait]
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::hardware::ManagementInterface;
|
||||
use crate::topology::IpAddress;
|
||||
use derive_new::new;
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
@ -1 +1,17 @@
|
||||
mod sqlite;
|
||||
use crate::{
|
||||
config::DATABASE_URL,
|
||||
infra::inventory::sqlite::SqliteInventoryRepository,
|
||||
inventory::{InventoryRepository, RepoError},
|
||||
};
|
||||
|
||||
pub mod sqlite;
|
||||
|
||||
pub struct InventoryRepositoryFactory;
|
||||
|
||||
impl InventoryRepositoryFactory {
|
||||
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
||||
Ok(Box::new(
|
||||
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
use crate::{
|
||||
data::Id,
|
||||
hardware::PhysicalHost,
|
||||
inventory::{InventoryRepository, RepoError},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use log::{info, warn};
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use sqlx::{Pool, Sqlite, SqlitePool};
|
||||
|
||||
/// A thread-safe, connection-pooled repository using SQLite.
|
||||
@ -19,11 +19,7 @@ impl SqliteInventoryRepository {
|
||||
.await
|
||||
.map_err(|e| RepoError::ConnectionFailed(e.to_string()))?;
|
||||
|
||||
todo!("make sure migrations are up to date");
|
||||
info!(
|
||||
"SQLite inventory repository initialized at '{}'",
|
||||
database_url,
|
||||
);
|
||||
info!("SQLite inventory repository initialized at '{database_url}'");
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
@ -50,7 +46,7 @@ impl InventoryRepository for SqliteInventoryRepository {
|
||||
}
|
||||
|
||||
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> {
|
||||
let row = sqlx::query_as!(
|
||||
let _row = sqlx::query_as!(
|
||||
DbHost,
|
||||
r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#,
|
||||
host_id
|
||||
|
@ -4,10 +4,11 @@ use log::info;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost, PxeOptions},
|
||||
topology::{DHCPStaticEntry, DhcpServer, LogicalHost, PxeOptions},
|
||||
};
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
#[async_trait]
|
||||
impl DhcpServer for OPNSenseFirewall {
|
||||
|
@ -1,11 +1,11 @@
|
||||
use crate::infra::opnsense::Host;
|
||||
use crate::infra::opnsense::IpAddress;
|
||||
use crate::infra::opnsense::LogicalHost;
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{DnsRecord, DnsServer},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{Firewall, FirewallRule, IpAddress, LogicalHost},
|
||||
topology::{Firewall, FirewallRule, LogicalHost},
|
||||
};
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
impl Firewall for OPNSenseFirewall {
|
||||
fn add_rule(&mut self, _rule: FirewallRule) -> Result<(), ExecutorError> {
|
||||
|
@ -1,13 +1,11 @@
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
data::FileContent,
|
||||
executors::ExecutorError,
|
||||
topology::{HttpServer, IpAddress, Url},
|
||||
};
|
||||
use crate::{data::FileContent, executors::ExecutorError, topology::HttpServer};
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::Url;
|
||||
const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http";
|
||||
|
||||
#[async_trait]
|
||||
|
@ -6,10 +6,11 @@ use uuid::Uuid;
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{
|
||||
BackendServer, HealthCheck, HttpMethod, HttpStatusCode, IpAddress, LoadBalancer,
|
||||
LoadBalancerService, LogicalHost,
|
||||
BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService,
|
||||
LogicalHost,
|
||||
},
|
||||
};
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
|
||||
|
@ -11,10 +11,8 @@ pub use management::*;
|
||||
use opnsense_config_xml::Host;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{IpAddress, LogicalHost},
|
||||
};
|
||||
use crate::{executors::ExecutorError, topology::LogicalHost};
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OPNSenseFirewall {
|
||||
|
@ -1,10 +1,9 @@
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{IpAddress, TftpServer, Url},
|
||||
};
|
||||
use crate::{executors::ExecutorError, topology::TftpServer};
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
|
||||
|
@ -4,13 +4,14 @@ use serde::Serialize;
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use super::ArgoApplication;
|
||||
|
||||
|
@ -11,7 +11,7 @@ use crate::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
|
||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||
};
|
||||
use crate::{
|
||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
@ -19,6 +19,7 @@ use crate::{
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
@ -13,12 +13,13 @@ use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::Topology,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ApplicationFeatureStatus {
|
||||
|
@ -15,10 +15,8 @@ use serde::Serialize;
|
||||
use tar::Archive;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::{
|
||||
score::Score,
|
||||
topology::{Topology, Url},
|
||||
};
|
||||
use crate::{score::Score, topology::Topology};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
@ -7,10 +8,11 @@ use crate::{
|
||||
domain::{data::Version, interpret::InterpretStatus},
|
||||
interpret::{Interpret, InterpretError, InterpretName, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::{DHCPStaticEntry, DhcpServer, HostBinding, IpAddress, PxeOptions, Topology},
|
||||
topology::{DHCPStaticEntry, DhcpServer, HostBinding, PxeOptions, Topology},
|
||||
};
|
||||
|
||||
use crate::domain::score::Score;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct DhcpScore {
|
||||
@ -135,7 +137,7 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<crate::domain::data::Id> {
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
@ -91,7 +92,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<crate::domain::data::Id> {
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
@ -67,7 +68,7 @@ impl<T: Topology> Interpret<T> for DummyInterpret {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<crate::domain::data::Id> {
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
@ -113,7 +114,7 @@ impl<T: Topology> Interpret<T> for PanicInterpret {
|
||||
InterpretStatus::QUEUED
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<crate::domain::data::Id> {
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
use crate::data::{Id, Version};
|
||||
use crate::data::Version;
|
||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use crate::inventory::Inventory;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{HelmCommand, Topology};
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use helm_wrapper_rs;
|
||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
||||
use log::{debug, info, warn};
|
||||
|
@ -8,11 +8,12 @@ use std::process::{Command, Output};
|
||||
use temp_dir::{self, TempDir};
|
||||
use temp_file::TempFile;
|
||||
|
||||
use crate::data::{Id, Version};
|
||||
use crate::data::Version;
|
||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use crate::inventory::Inventory;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{HelmCommand, K8sclient, Topology};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HelmCommandExecutor {
|
||||
|
@ -3,12 +3,14 @@ use derive_new::new;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{FileContent, Id, Version},
|
||||
data::{FileContent, Version},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{HttpServer, Topology, Url},
|
||||
topology::{HttpServer, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
/// Configure an HTTP server that is provided by the Topology
|
||||
///
|
||||
|
@ -1,15 +1,18 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_inventory_agent::local_presence::DiscoveryEvent;
|
||||
use log::{debug, info};
|
||||
use log::{debug, info, trace};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
hardware::{HostCategory, Label, PhysicalHost},
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
/// This launches an harmony_inventory_agent discovery process
|
||||
/// This will allow us to register/update hosts running harmony_inventory_agent
|
||||
@ -40,20 +43,89 @@ struct DiscoverInventoryAgentInterpret {
|
||||
impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
harmony_inventory_agent::local_presence::discover_agents(
|
||||
self.score.discovery_timeout,
|
||||
|event: DiscoveryEvent| {
|
||||
println!("Discovery event {event:?}");
|
||||
|event: DiscoveryEvent| -> Result<(), String> {
|
||||
debug!("Discovery event {event:?}");
|
||||
match event {
|
||||
DiscoveryEvent::ServiceResolved(service) => info!("Found instance {service:?}"),
|
||||
_ => debug!("Unhandled event {event:?}"),
|
||||
DiscoveryEvent::ServiceResolved(service) => {
|
||||
let service_name = service.fullname.clone();
|
||||
info!("Found service {service_name}");
|
||||
|
||||
let address = match service.get_addresses().iter().next() {
|
||||
Some(address) => address,
|
||||
None => {
|
||||
return Err(format!(
|
||||
"Could not find address for service {service_name}"
|
||||
));
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let address = address.to_string();
|
||||
let port = service.get_port();
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
info!("Getting inventory for host {address} at port {port}");
|
||||
let host =
|
||||
harmony_inventory_agent::client::get_host_inventory(&address, port)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
trace!("Found host information {host:?}");
|
||||
// TODO its useless to have two distinct host types but requires a bit much
|
||||
// refactoring to do it now
|
||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
} = host;
|
||||
|
||||
let host = PhysicalHost {
|
||||
id: Id::from(host_uuid),
|
||||
category: HostCategory::Server,
|
||||
network: network_interfaces,
|
||||
storage: storage_drives,
|
||||
labels: vec![Label {
|
||||
name: "discovered-by".to_string(),
|
||||
value: "harmony-inventory-agent".to_string(),
|
||||
}],
|
||||
memory_modules,
|
||||
cpus,
|
||||
};
|
||||
|
||||
let repo = InventoryRepositoryFactory::build()
|
||||
.await
|
||||
.map_err(|e| format!("Could not build repository : {e}"))
|
||||
.unwrap();
|
||||
repo.save(&host)
|
||||
.await
|
||||
.map_err(|e| format!("Could not save host : {e}"))
|
||||
.unwrap();
|
||||
info!(
|
||||
"Saved new host id {}, summary : {}",
|
||||
host.id,
|
||||
host.summary()
|
||||
);
|
||||
todo!()
|
||||
});
|
||||
}
|
||||
_ => debug!("Unhandled event {event:?}"),
|
||||
};
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.await;
|
||||
Ok(Outcome {
|
||||
status: InterpretStatus::SUCCESS,
|
||||
message: "Discovery process completed successfully".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
|
@ -3,12 +3,13 @@ use derive_new::new;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct IpxeScore {
|
||||
|
@ -6,12 +6,13 @@ use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
config::HARMONY_DATA_DIR,
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct K3DInstallationScore {
|
||||
|
@ -5,12 +5,13 @@ use log::info;
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct K8sResourceScore<K: Resource + std::fmt::Debug> {
|
||||
|
@ -3,6 +3,7 @@ use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, WORKDIR
|
||||
use dockerfile_builder::{Dockerfile, instruction_builder::EnvBuilder};
|
||||
use fqdn::fqdn;
|
||||
use harmony_macros::ingress_path;
|
||||
use harmony_types::net::Url;
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
@ -18,13 +19,14 @@ use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::modules::k8s::ingress::K8sIngressScore;
|
||||
use crate::topology::HelmCommand;
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::k8s::deployment::K8sDeploymentScore,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology, Url},
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use super::helm::chart::HelmChartScore;
|
||||
|
||||
|
@ -3,12 +3,13 @@ use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{LoadBalancer, LoadBalancerService, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct LoadBalancerScore {
|
||||
|
@ -20,8 +20,9 @@ use crate::{
|
||||
},
|
||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||
},
|
||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct DiscordWebhook {
|
||||
|
@ -19,8 +19,9 @@ use crate::{
|
||||
},
|
||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||
},
|
||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct WebhookReceiver {
|
||||
|
@ -4,7 +4,7 @@ use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
@ -15,6 +15,7 @@ use crate::{
|
||||
score::Score,
|
||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ApplicationMonitoringScore {
|
||||
|
@ -6,13 +6,14 @@ use serde::Serialize;
|
||||
use strum::{Display, EnumString};
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct NtfyScore {
|
||||
|
@ -1,17 +1,19 @@
|
||||
use askama::Template;
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::net::Url;
|
||||
use serde::Serialize;
|
||||
use std::net::IpAddr;
|
||||
|
||||
use crate::{
|
||||
data::{FileContent, FilePath, Id, Version},
|
||||
data::{FileContent, FilePath, Version},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::{dhcp::DhcpScore, http::StaticFilesHttpScore, tftp::TftpScore},
|
||||
score::Score,
|
||||
topology::{DhcpServer, HttpServer, Router, TftpServer, Topology, Url},
|
||||
topology::{DhcpServer, HttpServer, Router, TftpServer, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct OkdIpxeScore {
|
||||
|
@ -5,12 +5,13 @@ use serde::Serialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OPNsenseShellCommandScore {
|
||||
|
@ -24,7 +24,7 @@ use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
|
||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::monitoring::kube_prometheus::crd::{
|
||||
@ -37,6 +37,7 @@ use crate::{
|
||||
},
|
||||
score::Score,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use super::prometheus::PrometheusApplicationMonitoring;
|
||||
|
||||
|
@ -9,12 +9,13 @@ use serde::{Deserialize, Serialize};
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CephRemoveOsd {
|
||||
|
@ -3,15 +3,15 @@ use std::{sync::Arc, time::Duration};
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct CephVerifyClusterHealth {
|
||||
|
@ -5,7 +5,7 @@ use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
@ -14,6 +14,7 @@ use crate::{
|
||||
tenant::{TenantConfig, TenantManager},
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct TenantScore {
|
||||
|
@ -3,12 +3,14 @@ use derive_new::new;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{Router, TftpServer, Topology, Url},
|
||||
topology::{Router, TftpServer, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct TftpScore {
|
||||
|
@ -12,6 +12,9 @@ log.workspace = true
|
||||
env_logger.workspace = true
|
||||
tokio.workspace = true
|
||||
thiserror.workspace = true
|
||||
reqwest.workspace = true
|
||||
# mdns-sd = "0.14.1"
|
||||
mdns-sd = { git = "https://github.com/jggc/mdns-sd.git", branch = "patch-1" }
|
||||
local-ip-address = "0.6.5"
|
||||
harmony_types = { path = "../harmony_types" }
|
||||
harmony_macros = { path = "../harmony_macros" }
|
||||
|
15
harmony_inventory_agent/src/client.rs
Normal file
15
harmony_inventory_agent/src/client.rs
Normal file
@ -0,0 +1,15 @@
|
||||
use crate::hwinfo::PhysicalHost;
|
||||
|
||||
pub async fn get_host_inventory(host: &str, port: u16) -> Result<PhysicalHost, String> {
|
||||
let url = format!("http://{host}:{port}/inventory");
|
||||
let client = reqwest::Client::new();
|
||||
let response = client
|
||||
.get(url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to download file: {e}"))?;
|
||||
|
||||
let host = response.json().await.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(host)
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::{debug, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@ -18,7 +19,7 @@ pub struct PhysicalHost {
|
||||
pub host_uuid: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct StorageDrive {
|
||||
pub name: String,
|
||||
pub model: String,
|
||||
@ -32,13 +33,30 @@ pub struct StorageDrive {
|
||||
pub smart_status: Option<String>,
|
||||
}
|
||||
|
||||
impl StorageDrive {
|
||||
pub fn dummy() -> Self {
|
||||
Self {
|
||||
name: String::new(),
|
||||
model: String::new(),
|
||||
serial: String::new(),
|
||||
size_bytes: 0,
|
||||
logical_block_size: 0,
|
||||
physical_block_size: 0,
|
||||
rotational: false,
|
||||
wwn: None,
|
||||
interface_type: String::new(),
|
||||
smart_status: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct StorageController {
|
||||
pub name: String,
|
||||
pub driver: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct MemoryModule {
|
||||
pub size_bytes: u64,
|
||||
pub speed_mhz: Option<u32>,
|
||||
@ -48,7 +66,7 @@ pub struct MemoryModule {
|
||||
pub rank: Option<u8>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct CPU {
|
||||
pub model: String,
|
||||
pub vendor: String,
|
||||
@ -63,10 +81,10 @@ pub struct Chipset {
|
||||
pub vendor: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct NetworkInterface {
|
||||
pub name: String,
|
||||
pub mac_address: String,
|
||||
pub mac_address: MacAddress,
|
||||
pub speed_mbps: Option<u32>,
|
||||
pub is_up: bool,
|
||||
pub mtu: u32,
|
||||
@ -76,6 +94,24 @@ pub struct NetworkInterface {
|
||||
pub firmware_version: Option<String>,
|
||||
}
|
||||
|
||||
impl NetworkInterface {
|
||||
pub fn dummy() -> Self {
|
||||
use harmony_macros::mac_address;
|
||||
|
||||
Self {
|
||||
name: String::new(),
|
||||
mac_address: mac_address!("00:00:00:00:00:00"),
|
||||
speed_mbps: Some(0),
|
||||
is_up: false,
|
||||
mtu: 0,
|
||||
ipv4_addresses: vec![],
|
||||
ipv6_addresses: vec![],
|
||||
driver: String::new(),
|
||||
firmware_version: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ManagementInterface {
|
||||
pub kind: String,
|
||||
@ -509,6 +545,7 @@ impl PhysicalHost {
|
||||
|
||||
let mac_address = Self::read_sysfs_string(&iface_path.join("address"))
|
||||
.map_err(|e| format!("Failed to read MAC address for {}: {}", iface_name, e))?;
|
||||
let mac_address = MacAddress::try_from(mac_address).map_err(|e| e.to_string())?;
|
||||
|
||||
let speed_mbps = if iface_path.join("speed").exists() {
|
||||
match Self::read_sysfs_u32(&iface_path.join("speed")) {
|
||||
|
@ -1,2 +1,3 @@
|
||||
mod hwinfo;
|
||||
pub mod client;
|
||||
pub mod hwinfo;
|
||||
pub mod local_presence;
|
||||
|
@ -1,10 +1,13 @@
|
||||
use log::{debug, error};
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
|
||||
use crate::local_presence::SERVICE_NAME;
|
||||
|
||||
pub type DiscoveryEvent = ServiceEvent;
|
||||
|
||||
pub fn discover_agents(timeout: Option<u64>, on_event: impl Fn(DiscoveryEvent) + Send + 'static)
|
||||
pub async fn discover_agents<F>(timeout: Option<u64>, on_event: F)
|
||||
where
|
||||
F: FnOnce(DiscoveryEvent) -> Result<(), String> + Send + 'static + Copy,
|
||||
{
|
||||
// Create a new mDNS daemon.
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
||||
@ -13,23 +16,24 @@ pub fn discover_agents(timeout: Option<u64>, on_event: impl Fn(DiscoveryEvent) +
|
||||
// The receiver will be a stream of events.
|
||||
let receiver = mdns.browse(SERVICE_NAME).expect("Failed to browse");
|
||||
|
||||
std::thread::spawn(move || {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
while let Ok(event) = receiver.recv() {
|
||||
on_event(event.clone());
|
||||
if let Err(e) = on_event(event.clone()) {
|
||||
error!("Event callback failed : {e}");
|
||||
}
|
||||
match event {
|
||||
ServiceEvent::ServiceResolved(resolved) => {
|
||||
println!("Resolved a new service: {}", resolved.fullname);
|
||||
debug!("Resolved a new service: {}", resolved.fullname);
|
||||
}
|
||||
other_event => {
|
||||
println!("Received other event: {:?}", &other_event);
|
||||
debug!("Received other event: {:?}", &other_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(timeout) = timeout {
|
||||
// Gracefully shutdown the daemon.
|
||||
std::thread::sleep(std::time::Duration::from_secs(timeout));
|
||||
tokio::time::sleep(std::time::Duration::from_secs(timeout)).await;
|
||||
mdns.shutdown().unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -6,4 +6,6 @@ readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0.209", features = ["derive"] }
|
||||
serde.workspace = true
|
||||
url.workspace = true
|
||||
rand.workspace = true
|
||||
|
72
harmony_types/src/id.rs
Normal file
72
harmony_types/src/id.rs
Normal file
@ -0,0 +1,72 @@
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::distr::SampleString;
|
||||
use std::str::FromStr;
|
||||
use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// A unique identifier designed for ease of use.
|
||||
///
|
||||
/// You can pass it any String to use and Id, or you can use the default format with `Id::default()`
|
||||
///
|
||||
/// The default format looks like this
|
||||
///
|
||||
/// `462d4c_g2COgai`
|
||||
///
|
||||
/// The first part is the unix timesamp in hexadecimal which makes Id easily sorted by creation time.
|
||||
/// Second part is a serie of 7 random characters.
|
||||
///
|
||||
/// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per
|
||||
/// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Id {
|
||||
value: String,
|
||||
}
|
||||
|
||||
impl Id {
|
||||
pub fn empty() -> Self {
|
||||
Id {
|
||||
value: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Id {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Id {
|
||||
value: s.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Id {
|
||||
fn from(value: String) -> Self {
|
||||
Self { value }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Id {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Id {
|
||||
fn default() -> Self {
|
||||
let start = SystemTime::now();
|
||||
let since_the_epoch = start
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards");
|
||||
let timestamp = since_the_epoch.as_secs();
|
||||
|
||||
let hex_timestamp = format!("{:x}", timestamp & 0xffffff);
|
||||
|
||||
let random_part: String = Alphanumeric.sample_string(&mut rand::rng(), 7);
|
||||
|
||||
let value = format!("{}_{}", hex_timestamp, random_part);
|
||||
Self { value }
|
||||
}
|
||||
}
|
@ -1,28 +1,2 @@
|
||||
pub mod net {
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||
pub struct MacAddress(pub [u8; 6]);
|
||||
|
||||
impl MacAddress {
|
||||
#[cfg(test)]
|
||||
pub fn dummy() -> Self {
|
||||
Self([0, 0, 0, 0, 0, 0])
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&MacAddress> for String {
|
||||
fn from(value: &MacAddress) -> Self {
|
||||
format!(
|
||||
"{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
|
||||
value.0[0], value.0[1], value.0[2], value.0[3], value.0[4], value.0[5]
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MacAddress {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("MacAddress {}", String::from(self)))
|
||||
}
|
||||
}
|
||||
}
|
||||
pub mod id;
|
||||
pub mod net;
|
||||
|
79
harmony_types/src/net.rs
Normal file
79
harmony_types/src/net.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct MacAddress(pub [u8; 6]);
|
||||
|
||||
impl MacAddress {
|
||||
#[cfg(test)]
|
||||
pub fn dummy() -> Self {
|
||||
Self([0, 0, 0, 0, 0, 0])
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&MacAddress> for String {
|
||||
fn from(value: &MacAddress) -> Self {
|
||||
format!(
|
||||
"{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
|
||||
value.0[0], value.0[1], value.0[2], value.0[3], value.0[4], value.0[5]
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MacAddress {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("MacAddress {}", String::from(self)))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for MacAddress {
|
||||
type Error = std::io::Error;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
let parts: Vec<&str> = value.split(':').collect();
|
||||
if parts.len() != 6 {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
"Invalid MAC address format: expected 6 colon-separated hex pairs",
|
||||
));
|
||||
}
|
||||
let mut bytes = [0u8; 6];
|
||||
for (i, part) in parts.iter().enumerate() {
|
||||
bytes[i] = u8::from_str_radix(part, 16).map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
format!("Invalid hex value in part {}: '{}'", i, part),
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(MacAddress(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
pub type IpAddress = std::net::IpAddr;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Url {
|
||||
LocalFolder(String),
|
||||
Url(url::Url),
|
||||
}
|
||||
|
||||
impl Serialize for Url {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Url::LocalFolder(path) => serializer.serialize_str(path),
|
||||
Url::Url(url) => serializer.serialize_str(url.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Url {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Url::LocalFolder(path) => write!(f, "{}", path),
|
||||
Url::Url(url) => write!(f, "{}", url),
|
||||
}
|
||||
}
|
||||
}
|
8
migrations/20250830163356_Physical_hosts.sql
Normal file
8
migrations/20250830163356_Physical_hosts.sql
Normal file
@ -0,0 +1,8 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS physical_hosts (
|
||||
version_id TEXT PRIMARY KEY NOT NULL,
|
||||
id TEXT NOT NULL,
|
||||
data JSON NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_host_id_time
|
||||
ON physical_hosts (id, version_id DESC);
|
Loading…
Reference in New Issue
Block a user