Compare commits
9 Commits
feat/worke
...
c89c30e8f2
| Author | SHA1 | Date | |
|---|---|---|---|
| c89c30e8f2 | |||
| 148504439e | |||
| ca026e1d9e | |||
| 325d7891be | |||
| 4ea1af8d72 | |||
| cab4eb19ed | |||
| b5beda8efe | |||
| 4f7b0541f4 | |||
| adc14c052d |
@@ -1,7 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
id::Id,
|
|
||||||
net::{MacAddress, Url},
|
net::{MacAddress, Url},
|
||||||
switch::PortLocation,
|
switch::PortLocation,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,182 +0,0 @@
|
|||||||
use k8s_openapi::Resource as K8sResource;
|
|
||||||
use kube::api::{ApiResource, DynamicObject, GroupVersionKind};
|
|
||||||
use kube::core::TypeMeta;
|
|
||||||
use serde::Serialize;
|
|
||||||
use serde::de::DeserializeOwned;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
/// Convert a typed Kubernetes resource `K` into a `DynamicObject`.
|
|
||||||
///
|
|
||||||
/// Requirements:
|
|
||||||
/// - `K` must be a k8s_openapi resource (provides static GVK via `Resource`).
|
|
||||||
/// - `K` must have standard Kubernetes shape (metadata + payload fields).
|
|
||||||
///
|
|
||||||
/// Notes:
|
|
||||||
/// - We set `types` (apiVersion/kind) and copy `metadata`.
|
|
||||||
/// - We place the remaining top-level fields into `obj.data` as JSON.
|
|
||||||
/// - Scope is not encoded on the object itself; you still need the corresponding
|
|
||||||
/// `DynamicResource` (derived from K::group/version/kind) when constructing an Api.
|
|
||||||
///
|
|
||||||
/// Example usage:
|
|
||||||
/// let dyn_obj = kube_resource_to_dynamic(secret)?;
|
|
||||||
/// let api: Api<DynamicObject> = Api::namespaced_with(client, "ns", &dr);
|
|
||||||
/// api.patch(&dyn_obj.name_any(), &PatchParams::apply("mgr"), &Patch::Apply(dyn_obj)).await?;
|
|
||||||
pub fn kube_resource_to_dynamic<K>(res: &K) -> Result<DynamicObject, String>
|
|
||||||
where
|
|
||||||
K: K8sResource + Serialize + DeserializeOwned,
|
|
||||||
{
|
|
||||||
// Serialize the typed resource to JSON so we can split metadata and payload
|
|
||||||
let mut v = serde_json::to_value(res).map_err(|e| format!("Failed to serialize : {e}"))?;
|
|
||||||
let obj = v
|
|
||||||
.as_object_mut()
|
|
||||||
.ok_or_else(|| "expected object JSON".to_string())?;
|
|
||||||
|
|
||||||
// Extract and parse metadata into kube::core::ObjectMeta
|
|
||||||
let metadata_value = obj
|
|
||||||
.remove("metadata")
|
|
||||||
.ok_or_else(|| "missing metadata".to_string())?;
|
|
||||||
let metadata: kube::core::ObjectMeta = serde_json::from_value(metadata_value)
|
|
||||||
.map_err(|e| format!("Failed to deserialize : {e}"))?;
|
|
||||||
|
|
||||||
// Name is required for DynamicObject::new; prefer metadata.name
|
|
||||||
let name = metadata
|
|
||||||
.name
|
|
||||||
.clone()
|
|
||||||
.ok_or_else(|| "metadata.name is required".to_string())?;
|
|
||||||
|
|
||||||
// Remaining fields (spec/status/data/etc.) become the dynamic payload
|
|
||||||
let payload = Value::Object(obj.clone());
|
|
||||||
|
|
||||||
// Construct the DynamicObject
|
|
||||||
let mut dyn_obj = DynamicObject::new(
|
|
||||||
&name,
|
|
||||||
&ApiResource::from_gvk(&GroupVersionKind::gvk(K::GROUP, K::VERSION, K::KIND)),
|
|
||||||
);
|
|
||||||
dyn_obj.types = Some(TypeMeta {
|
|
||||||
api_version: api_version_for::<K>(),
|
|
||||||
kind: K::KIND.into(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Preserve namespace/labels/annotations/etc.
|
|
||||||
dyn_obj.metadata = metadata;
|
|
||||||
|
|
||||||
// Attach payload
|
|
||||||
dyn_obj.data = payload;
|
|
||||||
|
|
||||||
Ok(dyn_obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper: compute apiVersion string ("group/version" or "v1" for core).
|
|
||||||
fn api_version_for<K>() -> String
|
|
||||||
where
|
|
||||||
K: K8sResource,
|
|
||||||
{
|
|
||||||
let group = K::GROUP;
|
|
||||||
let version = K::VERSION;
|
|
||||||
if group.is_empty() {
|
|
||||||
version.to_string() // core/v1 => "v1"
|
|
||||||
} else {
|
|
||||||
format!("{}/{}", group, version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
use k8s_openapi::api::{
|
|
||||||
apps::v1::{Deployment, DeploymentSpec},
|
|
||||||
core::v1::{PodTemplateSpec, Secret},
|
|
||||||
};
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use pretty_assertions::assert_eq;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn secret_to_dynamic_roundtrip() {
|
|
||||||
// Create a sample Secret resource
|
|
||||||
let mut secret = Secret {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("my-secret".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Convert to DynamicResource
|
|
||||||
let dynamic: DynamicObject =
|
|
||||||
kube_resource_to_dynamic(&secret).expect("Failed to convert Secret to DynamicResource");
|
|
||||||
|
|
||||||
// Serialize both the original and dynamic resources to Value
|
|
||||||
let original_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
|
||||||
let dynamic_value =
|
|
||||||
serde_json::to_value(&dynamic).expect("Failed to serialize DynamicResource");
|
|
||||||
|
|
||||||
// Assert that they are identical
|
|
||||||
assert_eq!(original_value, dynamic_value);
|
|
||||||
|
|
||||||
secret.metadata.namespace = Some("false".to_string());
|
|
||||||
let modified_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
|
||||||
assert_ne!(modified_value, dynamic_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn deployment_to_dynamic_roundtrip() {
|
|
||||||
// Create a sample Deployment with nested structures
|
|
||||||
let mut deployment = Deployment {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("my-deployment".to_string()),
|
|
||||||
labels: Some({
|
|
||||||
let mut map = std::collections::BTreeMap::new();
|
|
||||||
map.insert("app".to_string(), "nginx".to_string());
|
|
||||||
map
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: Some(DeploymentSpec {
|
|
||||||
replicas: Some(3),
|
|
||||||
selector: Default::default(),
|
|
||||||
template: PodTemplateSpec {
|
|
||||||
metadata: Some(ObjectMeta {
|
|
||||||
labels: Some({
|
|
||||||
let mut map = std::collections::BTreeMap::new();
|
|
||||||
map.insert("app".to_string(), "nginx".to_string());
|
|
||||||
map
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
spec: Some(Default::default()), // PodSpec with empty containers for simplicity
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let dynamic = kube_resource_to_dynamic(&deployment).expect("Failed to convert Deployment");
|
|
||||||
|
|
||||||
let original_value = serde_json::to_value(&deployment).unwrap();
|
|
||||||
let dynamic_value = serde_json::to_value(&dynamic).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(original_value, dynamic_value);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
dynamic.data.get("spec").unwrap().get("replicas").unwrap(),
|
|
||||||
3
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
dynamic
|
|
||||||
.data
|
|
||||||
.get("spec")
|
|
||||||
.unwrap()
|
|
||||||
.get("template")
|
|
||||||
.unwrap()
|
|
||||||
.get("metadata")
|
|
||||||
.unwrap()
|
|
||||||
.get("labels")
|
|
||||||
.unwrap()
|
|
||||||
.get("app")
|
|
||||||
.unwrap()
|
|
||||||
.as_str()
|
|
||||||
.unwrap(),
|
|
||||||
"nginx".to_string()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,6 @@ pub mod executors;
|
|||||||
pub mod hp_ilo;
|
pub mod hp_ilo;
|
||||||
pub mod intel_amt;
|
pub mod intel_amt;
|
||||||
pub mod inventory;
|
pub mod inventory;
|
||||||
pub mod kube;
|
|
||||||
pub mod network_manager;
|
pub mod network_manager;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
mod sqlx;
|
mod sqlx;
|
||||||
|
|||||||
@@ -135,6 +135,8 @@ impl OpenShiftNmStateNetworkManager {
|
|||||||
description: Some(format!("Member of bond {bond_name}")),
|
description: Some(format!("Member of bond {bond_name}")),
|
||||||
r#type: nmstate::InterfaceType::Ethernet,
|
r#type: nmstate::InterfaceType::Ethernet,
|
||||||
state: "up".to_string(),
|
state: "up".to_string(),
|
||||||
|
mtu: Some(switch_port.interface.mtu),
|
||||||
|
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
enabled: Some(false),
|
enabled: Some(false),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -160,7 +162,7 @@ impl OpenShiftNmStateNetworkManager {
|
|||||||
|
|
||||||
interfaces.push(nmstate::Interface {
|
interfaces.push(nmstate::Interface {
|
||||||
name: bond_name.to_string(),
|
name: bond_name.to_string(),
|
||||||
description: Some(format!("HARMONY - Network bond for host {host}")),
|
description: Some(format!("Network bond for host {host}")),
|
||||||
r#type: nmstate::InterfaceType::Bond,
|
r#type: nmstate::InterfaceType::Bond,
|
||||||
state: "up".to_string(),
|
state: "up".to_string(),
|
||||||
copy_mac_from,
|
copy_mac_from,
|
||||||
@@ -239,7 +241,7 @@ impl OpenShiftNmStateNetworkManager {
|
|||||||
.and_then(|network_state| network_state.status.current_state.as_ref())
|
.and_then(|network_state| network_state.status.current_state.as_ref())
|
||||||
.map_or(&interfaces, |current_state| ¤t_state.interfaces)
|
.map_or(&interfaces, |current_state| ¤t_state.interfaces)
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|i| i.r#type == nmstate::InterfaceType::Bond)
|
.filter(|i| i.r#type == nmstate::InterfaceType::Bond && i.link_aggregation.is_some())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let used_ids: HashSet<u32> = existing_bonds
|
let used_ids: HashSet<u32> = existing_bonds
|
||||||
|
|||||||
@@ -1,21 +1,15 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::{debug, info};
|
use log::info;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
hardware::PhysicalHost,
|
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::Inventory,
|
||||||
modules::{
|
|
||||||
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
|
||||||
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
|
|
||||||
},
|
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HAClusterTopology, HostBinding},
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
@@ -58,159 +52,6 @@ impl OKDSetup04WorkersInterpret {
|
|||||||
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
|
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
|
||||||
/// It will trigger discovery if not enough hosts are found.
|
|
||||||
async fn get_nodes(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
|
||||||
const REQUIRED_HOSTS: usize = 2;
|
|
||||||
let repo = InventoryRepositoryFactory::build().await?;
|
|
||||||
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::Worker).await?;
|
|
||||||
|
|
||||||
while control_plane_hosts.len() < REQUIRED_HOSTS {
|
|
||||||
info!(
|
|
||||||
"Discovery of {} control plane hosts in progress, current number {}",
|
|
||||||
REQUIRED_HOSTS,
|
|
||||||
control_plane_hosts.len()
|
|
||||||
);
|
|
||||||
// This score triggers the discovery agent for a specific role.
|
|
||||||
DiscoverHostForRoleScore {
|
|
||||||
role: HostRole::Worker,
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
control_plane_hosts = repo.get_host_for_role(&HostRole::Worker).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
|
||||||
REQUIRED_HOSTS,
|
|
||||||
control_plane_hosts.len()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
// Take exactly the number of required hosts to ensure consistency.
|
|
||||||
Ok(control_plane_hosts
|
|
||||||
.into_iter()
|
|
||||||
.take(REQUIRED_HOSTS)
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configures DHCP host bindings for all control plane nodes.
|
|
||||||
async fn configure_host_binding(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!("[Worker] Configuring host bindings for worker nodes.");
|
|
||||||
|
|
||||||
// Ensure the topology definition matches the number of physical nodes found.
|
|
||||||
if topology.control_plane.len() != nodes.len() {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).",
|
|
||||||
topology.control_plane.len(),
|
|
||||||
nodes.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a binding for each physical host to its corresponding logical host.
|
|
||||||
let bindings: Vec<HostBinding> = topology
|
|
||||||
.control_plane
|
|
||||||
.iter()
|
|
||||||
.zip(nodes.iter())
|
|
||||||
.map(|(logical_host, physical_host)| {
|
|
||||||
info!(
|
|
||||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
|
||||||
logical_host.name, physical_host.id
|
|
||||||
);
|
|
||||||
HostBinding {
|
|
||||||
logical_host: logical_host.clone(),
|
|
||||||
physical_host: physical_host.clone(),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
DhcpHostBindingScore {
|
|
||||||
host_binding: bindings,
|
|
||||||
domain: Some(topology.domain_name.clone()),
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Renders and deploys a per-MAC iPXE boot file for each control plane node.
|
|
||||||
async fn configure_ipxe(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!("[Worker] Rendering per-MAC iPXE configurations.");
|
|
||||||
|
|
||||||
// The iPXE script content is the same for all control plane nodes,
|
|
||||||
// pointing to the 'master.ign' ignition file.
|
|
||||||
let content = BootstrapIpxeTpl {
|
|
||||||
http_ip: &topology.http_server.get_ip().to_string(),
|
|
||||||
scos_path: "scos",
|
|
||||||
ignition_http_path: "okd_ignition_files",
|
|
||||||
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
|
||||||
ignition_file_name: "worker.ign", // Worker nodes use the worker ignition file
|
|
||||||
}
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
debug!("[Worker] iPXE content template:\n{content}");
|
|
||||||
|
|
||||||
// Create and apply an iPXE boot file for each node.
|
|
||||||
for node in nodes {
|
|
||||||
let mac_address = node.get_mac_address();
|
|
||||||
if mac_address.is_empty() {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Physical host with ID '{}' has no MAC addresses defined.",
|
|
||||||
node.id
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"[Worker] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
|
||||||
node.id, mac_address
|
|
||||||
);
|
|
||||||
|
|
||||||
IPxeMacBootFileScore {
|
|
||||||
mac_address,
|
|
||||||
content: content.clone(),
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prompts the user to reboot the target control plane nodes.
|
|
||||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
|
||||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
|
||||||
info!("[Worker] Requesting reboot for control plane nodes: {node_ids:?}",);
|
|
||||||
|
|
||||||
let confirmation = inquire::Confirm::new(
|
|
||||||
&format!("Please reboot the {} worker nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
|
||||||
)
|
|
||||||
.prompt()
|
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
|
||||||
|
|
||||||
if !confirmation {
|
|
||||||
return Err(InterpretError::new(
|
|
||||||
"User aborted the operation.".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -233,23 +74,10 @@ impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
|||||||
|
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
topology: &HAClusterTopology,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.render_and_reboot().await?;
|
self.render_and_reboot().await?;
|
||||||
// 1. Ensure we have 2 physical hosts for the worker nodes.
|
|
||||||
let nodes = self.get_nodes(inventory, topology).await?;
|
|
||||||
|
|
||||||
// 2. Create DHCP reservations for the worker nodes.
|
|
||||||
self.configure_host_binding(inventory, topology, &nodes)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 3. Create iPXE files for each worker node to boot from the worker ignition.
|
|
||||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
|
||||||
|
|
||||||
// 4. Reboot the nodes to start the OS installation.
|
|
||||||
self.reboot_targets(&nodes).await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success("Workers provisioned".into()))
|
Ok(Outcome::success("Workers provisioned".into()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -417,7 +417,6 @@ pub struct EthernetSpec {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct BondSpec {
|
pub struct BondSpec {
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
#[serde(alias = "port")]
|
|
||||||
pub ports: Vec<String>,
|
pub ports: Vec<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub options: Option<BTreeMap<String, Value>>,
|
pub options: Option<BTreeMap<String, Value>>,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::{info, warn};
|
use log::{debug, info, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -150,6 +150,15 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for config in configs {
|
for config in configs {
|
||||||
|
let host = self
|
||||||
|
.score
|
||||||
|
.hosts
|
||||||
|
.iter()
|
||||||
|
.find(|h| h.id == config.host_id)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
println!("[Host] {host}");
|
||||||
|
|
||||||
if config.switch_ports.is_empty() {
|
if config.switch_ports.is_empty() {
|
||||||
report.push(format!(
|
report.push(format!(
|
||||||
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
|
||||||
pub struct MacAddress(pub [u8; 6]);
|
pub struct MacAddress(pub [u8; 6]);
|
||||||
|
|
||||||
impl MacAddress {
|
impl MacAddress {
|
||||||
@@ -19,14 +19,6 @@ impl From<&MacAddress> for String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for MacAddress {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_tuple("MacAddress")
|
|
||||||
.field(&String::from(self))
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for MacAddress {
|
impl std::fmt::Display for MacAddress {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_str(&String::from(self))
|
f.write_str(&String::from(self))
|
||||||
|
|||||||
Reference in New Issue
Block a user