Compare commits

...

6 Commits

Author SHA1 Message Date
a353249eec feat(okd installation): Process works nicely all the way up to setting the bootstrap host binding in opnsense automatically! Next step : generate the mac address boot file for bootstrap host, install ignition files and the cluster will booooooooot
Some checks failed
Run Check Script / check (pull_request) Failing after 33s
2025-09-02 00:39:52 -04:00
f6e665f990 feat: Can now select a bootstrap host and save the role mapping to database
Some checks failed
Run Check Script / check (pull_request) Failing after 33s
2025-09-02 00:10:20 -04:00
241980ebec wip: OKd installation, some cleanup of unused and some refactoring
All checks were successful
Run Check Script / check (pull_request) Successful in 1m13s
2025-09-01 23:36:35 -04:00
35a459f63c wip: OKD Installation full process automation underway, ready to test bootstrapping very soon
Some checks failed
Run Check Script / check (pull_request) Failing after 30s
2025-09-01 23:21:44 -04:00
f076d36297 wip: bootstrap step of okd installation required some refactoring, its getting there
Some checks failed
Run Check Script / check (pull_request) Failing after 30s
2025-09-01 19:14:31 -04:00
138e414727 feat(opnsense-config): dnsmasq dhcp static mappings
Some checks failed
Run Check Script / check (pull_request) Failing after 31s
2025-09-01 17:34:19 -04:00
36 changed files with 2933 additions and 438 deletions

View File

@@ -0,0 +1,20 @@
{
"db_name": "SQLite",
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
"describe": {
"columns": [
{
"name": "host_id",
"ordinal": 0,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false
]
},
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
}

View File

@@ -0,0 +1,32 @@
{
"db_name": "SQLite",
"query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json<PhysicalHost>\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "version_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "data: Json<PhysicalHost>",
"ordinal": 2,
"type_info": "Blob"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
false
]
},
"hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067"
}

View File

@@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 2
},
"nullable": []
},
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
}

23
Cargo.lock generated
View File

@@ -1775,6 +1775,24 @@ dependencies = [
"url", "url",
] ]
[[package]]
name = "example-okd-install"
version = "0.1.0"
dependencies = [
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_types",
"log",
"serde",
"tokio",
"url",
]
[[package]] [[package]]
name = "example-opnsense" name = "example-opnsense"
version = "0.1.0" version = "0.1.0"
@@ -2285,6 +2303,7 @@ dependencies = [
"helm-wrapper-rs", "helm-wrapper-rs",
"hex", "hex",
"http 1.3.1", "http 1.3.1",
"inquire",
"k3d-rs", "k3d-rs",
"k8s-openapi", "k8s-openapi",
"kube", "kube",
@@ -7049,7 +7068,7 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
[[package]] [[package]]
name = "yaserde" name = "yaserde"
version = "0.12.0" version = "0.12.0"
source = "git+https://github.com/jggc/yaserde.git#c94ca32b6505f9c9a668702a1b1f1f88c6374301" source = "git+https://github.com/jggc/yaserde.git#adfdb1c5f4d054f114e5bd0ea7bda9c07a369def"
dependencies = [ dependencies = [
"log", "log",
"xml-rs", "xml-rs",
@@ -7058,7 +7077,7 @@ dependencies = [
[[package]] [[package]]
name = "yaserde_derive" name = "yaserde_derive"
version = "0.12.0" version = "0.12.0"
source = "git+https://github.com/jggc/yaserde.git#c94ca32b6505f9c9a668702a1b1f1f88c6374301" source = "git+https://github.com/jggc/yaserde.git#adfdb1c5f4d054f114e5bd0ea7bda9c07a369def"
dependencies = [ dependencies = [
"heck", "heck",
"log", "log",

View File

@@ -2,7 +2,7 @@ use harmony::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
dummy::{ErrorScore, PanicScore, SuccessScore}, dummy::{ErrorScore, PanicScore, SuccessScore},
inventory::DiscoverInventoryAgentScore, inventory::LaunchDiscoverInventoryAgentScore,
}, },
topology::LocalhostTopology, topology::LocalhostTopology,
}; };
@@ -16,7 +16,7 @@ async fn main() {
Box::new(SuccessScore {}), Box::new(SuccessScore {}),
Box::new(ErrorScore {}), Box::new(ErrorScore {}),
Box::new(PanicScore {}), Box::new(PanicScore {}),
Box::new(DiscoverInventoryAgentScore { Box::new(LaunchDiscoverInventoryAgentScore {
discovery_timeout: Some(10), discovery_timeout: Some(10),
}), }),
], ],

View File

@@ -5,16 +5,15 @@ use std::{
use cidr::Ipv4Cidr; use cidr::Ipv4Cidr;
use harmony::{ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface, infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
http::StaticFilesHttpScore, http::StaticFilesHttpScore,
ipxe::IpxeScore,
okd::{ okd::{
bootstrap_dhcp::OKDBootstrapDhcpScore, bootstrap_dhcp::OKDBootstrapDhcpScore,
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore, bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
dns::OKDDnsScore, dns::OKDDnsScore, ipxe::OkdIpxeScore,
}, },
tftp::TftpScore, tftp::TftpScore,
}, },
@@ -131,7 +130,16 @@ async fn main() {
)), )),
files: vec![], files: vec![],
}; };
let ipxe_score = IpxeScore::new();
let kickstart_filename = "inventory.kickstart".to_string();
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let ipxe_score = OkdIpxeScore {
kickstart_filename,
harmony_inventory_agent,
cluster_pubkey_filename,
};
harmony_tui::run( harmony_tui::run(
inventory, inventory,

View File

@@ -0,0 +1,21 @@
[package]
name = "example-okd-install"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true

View File

@@ -0,0 +1,20 @@
mod topology;
use crate::topology::{get_inventory, get_topology};
use harmony::modules::okd::{installation::OKDInstallationScore, ipxe::OkdIpxeScore};
#[tokio::main]
async fn main() {
let inventory = get_inventory();
let topology = get_topology().await;
let kickstart_filename = "inventory.kickstart".to_string();
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let okd_install = Box::new(OKDInstallationScore {});
harmony_cli::run(inventory, topology, vec![okd_install], None)
.await
.unwrap();
}

View File

@@ -0,0 +1,77 @@
use cidr::Ipv4Cidr;
use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager};
use serde::{Deserialize, Serialize};
use std::{net::IpAddr, sync::Arc};
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct OPNSenseFirewallConfig {
username: String,
password: String,
}
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("opnsense-1"),
};
let config = SecretManager::get::<OPNSenseFirewallConfig>().await;
let config = config.unwrap();
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(
firewall,
None,
&config.username,
&config.password,
)
.await,
);
let lan_subnet = ipv4!("192.168.1.0");
let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![LogicalHost {
ip: ip!("192.168.1.20"),
name: "cp0".to_string(),
}],
bootstrap_host: LogicalHost {
ip: ip!("192.168.1.20"),
name: "bootstrap".to_string(),
},
workers: vec![],
switch: vec![],
}
}
pub fn get_inventory() -> Inventory {
Inventory {
location: Location::new(
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
"testopnsense".to_string(),
),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![],
}
}

View File

@@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
-----END OPENSSH PRIVATE KEY-----

View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2

View File

@@ -70,6 +70,7 @@ harmony_inventory_agent = { path = "../harmony_inventory_agent" }
harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" } harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" }
askama.workspace = true askama.workspace = true
sqlx.workspace = true sqlx.workspace = true
inquire.workspace = true
[dev-dependencies] [dev-dependencies]
pretty_assertions.workspace = true pretty_assertions.workspace = true

View File

@@ -10,7 +10,7 @@ pub type HostGroup = Vec<PhysicalHost>;
pub type SwitchGroup = Vec<Switch>; pub type SwitchGroup = Vec<Switch>;
pub type FirewallGroup = Vec<PhysicalHost>; pub type FirewallGroup = Vec<PhysicalHost>;
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PhysicalHost { pub struct PhysicalHost {
pub id: Id, pub id: Id,
pub category: HostCategory, pub category: HostCategory,
@@ -173,6 +173,10 @@ impl PhysicalHost {
self self
} }
pub fn get_mac_address(&self) -> Vec<MacAddress> {
self.network.iter().map(|nic| nic.mac_address).collect()
}
pub fn label(mut self, name: String, value: String) -> Self { pub fn label(mut self, name: String, value: String) -> Self {
self.labels.push(Label { name, value }); self.labels.push(Label { name, value });
self self
@@ -221,14 +225,6 @@ impl PhysicalHost {
// } // }
// } // }
impl<'de> Deserialize<'de> for PhysicalHost {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
todo!()
}
}
#[derive(new, Serialize)] #[derive(new, Serialize)]
pub struct ManualManagementInterface; pub struct ManualManagementInterface;
@@ -273,7 +269,7 @@ where
} }
} }
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub enum HostCategory { pub enum HostCategory {
Server, Server,
Firewall, Firewall,
@@ -291,7 +287,7 @@ pub struct Switch {
_management_interface: NetworkInterface, _management_interface: NetworkInterface,
} }
#[derive(Debug, new, Clone, Serialize)] #[derive(Debug, new, Clone, Serialize, Deserialize)]
pub struct Label { pub struct Label {
pub name: String, pub name: String,
pub value: String, pub value: String,

View File

@@ -17,6 +17,7 @@ impl InventoryFilter {
use derive_new::new; use derive_new::new;
use log::info; use log::info;
use serde::{Deserialize, Serialize};
use crate::hardware::{ManagementInterface, ManualManagementInterface}; use crate::hardware::{ManagementInterface, ManualManagementInterface};
@@ -61,3 +62,11 @@ impl Inventory {
} }
} }
} }
#[derive(Debug, Serialize, Deserialize, sqlx::Type)]
pub enum HostRole {
Bootstrap,
ControlPlane,
Worker,
Storage,
}

View File

@@ -1,6 +1,6 @@
use async_trait::async_trait; use async_trait::async_trait;
use crate::hardware::PhysicalHost; use crate::{hardware::PhysicalHost, interpret::InterpretError, inventory::HostRole};
/// Errors that can occur within the repository layer. /// Errors that can occur within the repository layer.
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
@@ -15,6 +15,12 @@ pub enum RepoError {
ConnectionFailed(String), ConnectionFailed(String),
} }
impl From<RepoError> for InterpretError {
fn from(value: RepoError) -> Self {
InterpretError::new(format!("Interpret error : {value}"))
}
}
// --- Trait and Implementation --- // --- Trait and Implementation ---
/// Defines the contract for inventory persistence. /// Defines the contract for inventory persistence.
@@ -22,4 +28,11 @@ pub enum RepoError {
pub trait InventoryRepository: Send + Sync + 'static { pub trait InventoryRepository: Send + Sync + 'static {
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>; async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>;
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>; async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>;
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError>;
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError>;
async fn save_role_mapping(
&self,
role: &HostRole,
host: &PhysicalHost,
) -> Result<(), RepoError>;
} }

View File

@@ -161,6 +161,14 @@ impl DhcpServer for HAClusterTopology {
self.dhcp_server.set_pxe_options(options).await self.dhcp_server.set_pxe_options(options).await
} }
async fn set_dhcp_range(
&self,
start: &IpAddress,
end: &IpAddress,
) -> Result<(), ExecutorError> {
self.dhcp_server.set_dhcp_range(start, end).await
}
fn get_ip(&self) -> IpAddress { fn get_ip(&self) -> IpAddress {
self.dhcp_server.get_ip() self.dhcp_server.get_ip()
} }
@@ -298,6 +306,13 @@ impl DhcpServer for DummyInfra {
async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> { async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
} }
async fn set_dhcp_range(
&self,
start: &IpAddress,
end: &IpAddress,
) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress { fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
} }

View File

@@ -59,6 +59,8 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>; async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>;
async fn set_dhcp_range(&self, start: &IpAddress, end: &IpAddress)
-> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress; fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost; fn get_host(&self) -> LogicalHost;
async fn commit_config(&self) -> Result<(), ExecutorError>; async fn commit_config(&self) -> Result<(), ExecutorError>;

View File

@@ -1,6 +1,6 @@
use crate::{ use crate::{
hardware::PhysicalHost, hardware::PhysicalHost,
inventory::{InventoryRepository, RepoError}, inventory::{HostRole, InventoryRepository, RepoError},
}; };
use async_trait::async_trait; use async_trait::async_trait;
use harmony_types::id::Id; use harmony_types::id::Id;
@@ -46,20 +46,103 @@ impl InventoryRepository for SqliteInventoryRepository {
} }
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> { async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> {
let _row = sqlx::query_as!( let row = sqlx::query_as!(
DbHost, DbHost,
r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#, r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#,
host_id host_id
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await?; .await?;
todo!()
Ok(row.map(|r| r.data.0))
}
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError> {
let db_hosts = sqlx::query_as!(
DbHost,
r#"
SELECT
p1.id,
p1.version_id,
p1.data as "data: Json<PhysicalHost>"
FROM
physical_hosts p1
INNER JOIN (
SELECT
id,
MAX(version_id) AS max_version
FROM
physical_hosts
GROUP BY
id
) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version
"#
)
.fetch_all(&self.pool)
.await?;
let hosts = db_hosts.into_iter().map(|row| row.data.0).collect();
Ok(hosts)
}
async fn save_role_mapping(
&self,
role: &HostRole,
host: &PhysicalHost,
) -> Result<(), RepoError> {
let host_id = host.id.to_string();
sqlx::query!(
r#"
INSERT INTO host_role_mapping (host_id, role)
VALUES (?, ?)
"#,
host_id,
role
)
.execute(&self.pool)
.await?;
info!("Saved role mapping for host '{}' as '{:?}'", host.id, role);
Ok(())
}
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError> {
struct HostIdRow {
host_id: String,
}
let role_str = format!("{:?}", role);
let host_id_rows = sqlx::query_as!(
HostIdRow,
"SELECT host_id FROM host_role_mapping WHERE role = ?",
role_str
)
.fetch_all(&self.pool)
.await?;
let mut hosts = Vec::with_capacity(host_id_rows.len());
for row in host_id_rows {
match self.get_latest_by_id(&row.host_id).await? {
Some(host) => hosts.push(host),
None => {
log::warn!(
"Found a role mapping for host_id '{}', but the host does not exist in the physical_hosts table. This may indicate a data integrity issue.",
row.host_id
);
}
}
}
Ok(hosts)
} }
} }
use sqlx::types::Json; use sqlx::types::Json;
struct DbHost { struct DbHost {
data: Json<PhysicalHost>, data: Json<PhysicalHost>,
id: Id, id: String,
version_id: Id, version_id: String,
} }

View File

@@ -68,4 +68,19 @@ impl DhcpServer for OPNSenseFirewall {
ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}")) ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}"))
}) })
} }
async fn set_dhcp_range(
&self,
start: &IpAddress,
end: &IpAddress,
) -> Result<(), ExecutorError> {
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense
.dhcp()
.set_dhcp_range(&start.to_string(), &end.to_string())
.await
.map_err(|dhcp_error| {
ExecutorError::UnexpectedError(format!("Failed to set_dhcp_range : {dhcp_error}"))
})
}
} }

View File

@@ -1,7 +1,7 @@
use async_trait::async_trait; use async_trait::async_trait;
use derive_new::new; use derive_new::new;
use harmony_types::id::Id; use harmony_types::id::Id;
use log::info; use log::{info, trace};
use serde::Serialize; use serde::Serialize;
use crate::{ use crate::{
@@ -22,6 +22,7 @@ pub struct DhcpScore {
pub filename: Option<String>, pub filename: Option<String>,
pub filename64: Option<String>, pub filename64: Option<String>,
pub filenameipxe: Option<String>, pub filenameipxe: Option<String>,
pub dhcp_range: (IpAddress, IpAddress),
} }
impl<T: Topology + DhcpServer> Score<T> for DhcpScore { impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
@@ -52,48 +53,6 @@ impl DhcpInterpret {
status: InterpretStatus::QUEUED, status: InterpretStatus::QUEUED,
} }
} }
async fn add_static_entries<D: DhcpServer>(
&self,
_inventory: &Inventory,
dhcp_server: &D,
) -> Result<Outcome, InterpretError> {
let dhcp_entries: Vec<DHCPStaticEntry> = self
.score
.host_binding
.iter()
.map(|binding| {
let ip = match binding.logical_host.ip {
std::net::IpAddr::V4(ipv4) => ipv4,
std::net::IpAddr::V6(_) => {
unimplemented!("DHCPStaticEntry only supports ipv4 at the moment")
}
};
DHCPStaticEntry {
name: binding.logical_host.name.clone(),
mac: binding.physical_host.cluster_mac(),
ip,
}
})
.collect();
info!("DHCPStaticEntry : {:?}", dhcp_entries);
info!("DHCP server : {:?}", dhcp_server);
let number_new_entries = dhcp_entries.len();
for entry in dhcp_entries.into_iter() {
match dhcp_server.add_static_mapping(&entry).await {
Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry),
Err(_) => todo!(),
}
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret registered {} entries", number_new_entries),
))
}
async fn set_pxe_options<D: DhcpServer>( async fn set_pxe_options<D: DhcpServer>(
&self, &self,
@@ -124,7 +83,7 @@ impl DhcpInterpret {
} }
#[async_trait] #[async_trait]
impl<T: DhcpServer> Interpret<T> for DhcpInterpret { impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDHCP InterpretName::OPNSenseDHCP
} }
@@ -149,8 +108,15 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
info!("Executing DhcpInterpret on inventory {inventory:?}"); info!("Executing DhcpInterpret on inventory {inventory:?}");
self.set_pxe_options(inventory, topology).await?; self.set_pxe_options(inventory, topology).await?;
topology
.set_dhcp_range(&self.score.dhcp_range.0, &self.score.dhcp_range.1)
.await?;
self.add_static_entries(inventory, topology).await?; DhcpHostBindingScore {
host_binding: self.score.host_binding.clone(),
}
.interpret(inventory, topology)
.await?;
topology.commit_config().await?; topology.commit_config().await?;
@@ -160,3 +126,113 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
)) ))
} }
} }
#[derive(Debug, new, Clone, Serialize)]
pub struct DhcpHostBindingScore {
pub host_binding: Vec<HostBinding>,
}
impl<T: Topology + DhcpServer> Score<T> for DhcpHostBindingScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DhcpHostBindingInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
"DhcpHostBindingScore".to_string()
}
}
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
#[derive(Debug, Clone)]
pub struct DhcpHostBindingInterpret {
score: DhcpHostBindingScore,
}
impl DhcpHostBindingInterpret {
async fn add_static_entries<D: DhcpServer>(
&self,
_inventory: &Inventory,
dhcp_server: &D,
) -> Result<Outcome, InterpretError> {
let dhcp_entries: Vec<DHCPStaticEntry> = self
.score
.host_binding
.iter()
.map(|binding| {
let ip = match binding.logical_host.ip {
std::net::IpAddr::V4(ipv4) => ipv4,
std::net::IpAddr::V6(_) => {
unimplemented!("DHCPStaticEntry only supports ipv4 at the moment")
}
};
DHCPStaticEntry {
name: binding.logical_host.name.clone(),
mac: binding.physical_host.cluster_mac(),
ip,
}
})
.collect();
info!("DHCPStaticEntry : {:?}", dhcp_entries);
trace!("DHCP server : {:?}", dhcp_server);
let number_new_entries = dhcp_entries.len();
for entry in dhcp_entries.into_iter() {
match dhcp_server.add_static_mapping(&entry).await {
Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry),
Err(_) => todo!(),
}
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret registered {} entries", number_new_entries),
))
}
}
#[async_trait]
impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("DhcpHostBindingInterpret")
}
fn get_version(&self) -> crate::domain::data::Version {
Version::from("1.0.0").unwrap()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!(
"Executing DhcpHostBindingInterpret on {} bindings",
self.score.host_binding.len()
);
self.add_static_entries(inventory, topology).await?;
topology.commit_config().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Dhcp Host Binding Interpret execution successful on {} hosts",
self.score.host_binding.len()
),
))
}
}

View File

@@ -3,14 +3,14 @@ use derive_new::new;
use serde::Serialize; use serde::Serialize;
use crate::{ use crate::{
data::{FileContent, Version}, data::{FileContent, FilePath, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory, inventory::Inventory,
score::Score, score::Score,
topology::{HttpServer, Topology}, topology::{HttpServer, Topology},
}; };
use harmony_types::id::Id;
use harmony_types::net::Url; use harmony_types::net::Url;
use harmony_types::{id::Id, net::MacAddress};
/// Configure an HTTP server that is provided by the Topology /// Configure an HTTP server that is provided by the Topology
/// ///
@@ -91,3 +91,33 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
todo!() todo!()
} }
} }
#[derive(Debug, new, Clone, Serialize)]
pub struct IPxeMacBootFileScore {
pub content: String,
pub mac_address: Vec<MacAddress>,
}
impl<T: Topology + HttpServer> Score<T> for IPxeMacBootFileScore {
fn name(&self) -> String {
"IPxeMacBootFileScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
StaticFilesHttpScore {
folder_to_serve: None,
files: self
.mac_address
.iter()
.map(|mac| FileContent {
path: FilePath::Relative(format!(
"byMAC/01-{}.ipxe",
mac.to_string().replace(":", "-")
)),
content: self.content.clone(),
})
.collect(),
}
.create_interpret()
}
}

View File

@@ -18,11 +18,11 @@ use harmony_types::id::Id;
/// This will allow us to register/update hosts running harmony_inventory_agent /// This will allow us to register/update hosts running harmony_inventory_agent
/// from LAN in the Harmony inventory /// from LAN in the Harmony inventory
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DiscoverInventoryAgentScore { pub struct LaunchDiscoverInventoryAgentScore {
pub discovery_timeout: Option<u64>, pub discovery_timeout: Option<u64>,
} }
impl<T: Topology> Score<T> for DiscoverInventoryAgentScore { impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore {
fn name(&self) -> String { fn name(&self) -> String {
"DiscoverInventoryAgentScore".to_string() "DiscoverInventoryAgentScore".to_string()
} }
@@ -36,7 +36,7 @@ impl<T: Topology> Score<T> for DiscoverInventoryAgentScore {
#[derive(Debug)] #[derive(Debug)]
struct DiscoverInventoryAgentInterpret { struct DiscoverInventoryAgentInterpret {
score: DiscoverInventoryAgentScore, score: LaunchDiscoverInventoryAgentScore,
} }
#[async_trait] #[async_trait]
@@ -46,6 +46,13 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
_inventory: &Inventory, _inventory: &Inventory,
_topology: &T, _topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
match self.score.discovery_timeout {
Some(timeout) => info!("Discovery agent will wait for {timeout} seconds"),
None => info!(
"Discovery agent will wait forever in the background, go on and enjoy this delicious inventory."
),
};
harmony_inventory_agent::local_presence::discover_agents( harmony_inventory_agent::local_presence::discover_agents(
self.score.discovery_timeout, self.score.discovery_timeout,
|event: DiscoveryEvent| -> Result<(), String> { |event: DiscoveryEvent| -> Result<(), String> {

View File

@@ -1,67 +0,0 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::Topology,
};
use harmony_types::id::Id;
#[derive(Debug, new, Clone, Serialize)]
pub struct IpxeScore {
//files_to_serve: Url,
}
impl<T: Topology> Score<T> for IpxeScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(IpxeInterpret::new(self.clone()))
}
fn name(&self) -> String {
"IpxeScore".to_string()
}
}
#[derive(Debug, new, Clone)]
pub struct IpxeInterpret {
_score: IpxeScore,
}
#[async_trait]
impl<T: Topology> Interpret<T> for IpxeInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
/*
let http_server = &topology.http_server;
http_server.ensure_initialized().await?;
Ok(Outcome::success(format!(
"Http Server running and serving files from {}",
self.score.files_to_serve
)))
*/
todo!();
}
fn get_name(&self) -> InterpretName {
InterpretName::Ipxe
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -6,7 +6,6 @@ pub mod dummy;
pub mod helm; pub mod helm;
pub mod http; pub mod http;
pub mod inventory; pub mod inventory;
pub mod ipxe;
pub mod k3d; pub mod k3d;
pub mod k8s; pub mod k8s;
pub mod lamp; pub mod lamp;

View File

@@ -37,21 +37,23 @@ impl OKDBootstrapDhcpScore {
.clone(), .clone(),
}); });
// TODO refactor this so it is not copy pasted from dhcp.rs // TODO refactor this so it is not copy pasted from dhcp.rs
Self { todo!("Add dhcp range")
dhcp_score: DhcpScore::new( // Self {
host_binding, // dhcp_score: DhcpScore::new(
// TODO : we should add a tftp server to the topology instead of relying on the // host_binding,
// router address, this is leaking implementation details // // TODO : we should add a tftp server to the topology instead of relying on the
Some(topology.router.get_gateway()), // // router address, this is leaking implementation details
None, // To allow UEFI boot we cannot provide a legacy file // Some(topology.router.get_gateway()),
Some("undionly.kpxe".to_string()), // None, // To allow UEFI boot we cannot provide a legacy file
Some("ipxe.efi".to_string()), // Some("undionly.kpxe".to_string()),
Some(format!( // Some("ipxe.efi".to_string()),
"http://{}:8080/boot.ipxe", // Some(format!(
topology.router.get_gateway() // "http://{}:8080/boot.ipxe",
)), // topology.router.get_gateway()
), // )),
} // (self.),
// ),
// }
} }
} }

View File

@@ -1,3 +1,6 @@
use std::net::Ipv4Addr;
use harmony_types::net::IpAddress;
use serde::Serialize; use serde::Serialize;
use crate::{ use crate::{
@@ -44,6 +47,16 @@ impl OKDDhcpScore {
}) })
}); });
let dhcp_server_ip = match topology.dhcp_server.get_ip() {
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"),
};
// TODO this could overflow, we should use proper subnet maths here instead of an ip
// address and guessing the subnet size from there
let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100);
let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150);
Self { Self {
// TODO : we should add a tftp server to the topology instead of relying on the // TODO : we should add a tftp server to the topology instead of relying on the
// router address, this is leaking implementation details // router address, this is leaking implementation details
@@ -57,6 +70,7 @@ impl OKDDhcpScore {
"http://{}:8080/boot.ipxe", "http://{}:8080/boot.ipxe",
topology.router.get_gateway() topology.router.get_gateway()
)), )),
dhcp_range: (IpAddress::from(start), IpAddress::from(end)),
}, },
} }
} }

View File

@@ -44,24 +44,28 @@
//! which must be configured on host AND switch to connect properly. //! which must be configured on host AND switch to connect properly.
//! //!
//! Configuration knobs //! Configuration knobs
//! - lan_cidr: CIDR to scan/allow for discovery endpoints.
//! - public_domain: External wildcard/apps domain (e.g., apps.example.com). //! - public_domain: External wildcard/apps domain (e.g., apps.example.com).
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd). //! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
use async_trait::async_trait; use async_trait::async_trait;
use derive_new::new; use derive_new::new;
use harmony_macros::ip;
use harmony_types::id::Id; use harmony_types::id::Id;
use log::info; use log::{error, info, warn};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::{
data::Version, data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument}, instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory, inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
inventory::LaunchDiscoverInventoryAgentScore,
},
score::Score, score::Score,
topology::{DnsRecord, DnsRecordType, DnsServer, Topology}, topology::{HAClusterTopology, HostBinding},
}; };
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
@@ -69,17 +73,10 @@ use crate::{
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, Deserialize, new)] #[derive(Debug, Clone, Serialize, Deserialize, new)]
pub struct OKDInstallationScore { pub struct OKDInstallationScore {}
/// The LAN CIDR where discovery endpoints live (e.g., 192.168.10.0/24)
pub lan_cidr: String,
/// Public external domain (e.g., example.com). Used for api/apps wildcard, etc.
pub public_domain: String,
/// Internal cluster domain (e.g., harmony.mcd). Used for internal svc/ingress and DNS.
pub internal_domain: String,
}
impl<T: Topology + DnsServer + 'static> Score<T> for OKDInstallationScore { impl Score<HAClusterTopology> for OKDInstallationScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDInstallationInterpret::new(self.clone())) Box::new(OKDInstallationInterpret::new(self.clone()))
} }
@@ -109,86 +106,74 @@ impl OKDInstallationInterpret {
} }
} }
async fn run_inventory_phase<T: Topology + DnsServer>( async fn run_inventory_phase(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<(), InterpretError> { ) -> Result<(), InterpretError> {
// 1) Prepare DNS and DHCP lease registration (optional) // 1) Prepare DNS and DHCP lease registration (optional)
let dns_score = OKDSetup01InventoryDnsScore::new(
self.score.internal_domain.clone(),
self.score.public_domain.clone(),
Some(true), // register_dhcp_leases
);
dns_score.interpret(inventory, topology).await?;
// 2) Serve default iPXE + Kickstart and poll discovery // 2) Serve default iPXE + Kickstart and poll discovery
let discovery_score = OKDSetup01InventoryScore::new(self.score.lan_cidr.clone()); let discovery_score = OKDSetup01InventoryScore::new();
discovery_score.interpret(inventory, topology).await?; discovery_score.interpret(inventory, topology).await?;
Ok(()) Ok(())
} }
async fn run_bootstrap_phase<T: Topology + DnsServer>( async fn run_bootstrap_phase(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<(), InterpretError> { ) -> Result<(), InterpretError> {
// Select and provision bootstrap // Select and provision bootstrap
let bootstrap_score = OKDSetup02BootstrapScore::new( let bootstrap_score = OKDSetup02BootstrapScore::new();
self.score.public_domain.clone(),
self.score.internal_domain.clone(),
);
bootstrap_score.interpret(inventory, topology).await?; bootstrap_score.interpret(inventory, topology).await?;
Ok(()) Ok(())
} }
async fn run_control_plane_phase<T: Topology + DnsServer>( async fn run_control_plane_phase(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<(), InterpretError> { ) -> Result<(), InterpretError> {
let control_plane_score = OKDSetup03ControlPlaneScore::new(); let control_plane_score = OKDSetup03ControlPlaneScore::new();
control_plane_score.interpret(inventory, topology).await?; control_plane_score.interpret(inventory, topology).await?;
Ok(()) Ok(())
} }
async fn run_workers_phase<T: Topology + DnsServer>( async fn run_workers_phase(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<(), InterpretError> { ) -> Result<(), InterpretError> {
let workers_score = OKDSetup04WorkersScore::new(); let workers_score = OKDSetup04WorkersScore::new();
workers_score.interpret(inventory, topology).await?; workers_score.interpret(inventory, topology).await?;
Ok(()) Ok(())
} }
async fn run_sanity_phase<T: Topology + DnsServer>( async fn run_sanity_phase(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<(), InterpretError> { ) -> Result<(), InterpretError> {
let sanity_score = OKDSetup05SanityCheckScore::new(); let sanity_score = OKDSetup05SanityCheckScore::new();
sanity_score.interpret(inventory, topology).await?; sanity_score.interpret(inventory, topology).await?;
Ok(()) Ok(())
} }
async fn run_report_phase<T: Topology + DnsServer>( async fn run_report_phase(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<(), InterpretError> { ) -> Result<(), InterpretError> {
let report_score = OKDSetup06InstallationReportScore::new( let report_score = OKDSetup06InstallationReportScore::new();
self.score.public_domain.clone(),
self.score.internal_domain.clone(),
);
report_score.interpret(inventory, topology).await?; report_score.interpret(inventory, topology).await?;
Ok(()) Ok(())
} }
} }
#[async_trait] #[async_trait]
impl<T: Topology + DnsServer> Interpret<T> for OKDInstallationInterpret { impl Interpret<HAClusterTopology> for OKDInstallationInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDInstallationInterpret") InterpretName::Custom("OKDInstallationInterpret")
} }
@@ -208,14 +193,11 @@ impl<T: Topology + DnsServer> Interpret<T> for OKDInstallationInterpret {
async fn execute( async fn execute(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
instrument(HarmonyEvent::HarmonyStarted).ok(); instrument(HarmonyEvent::HarmonyStarted).ok();
info!( info!("Starting OKD installation pipeline",);
"Starting OKD installation pipeline for public_domain={} internal_domain={} lan_cidr={}",
self.score.public_domain, self.score.internal_domain, self.score.lan_cidr
);
self.run_inventory_phase(inventory, topology).await?; self.run_inventory_phase(inventory, topology).await?;
@@ -238,111 +220,6 @@ impl<T: Topology + DnsServer> Interpret<T> for OKDInstallationInterpret {
} }
} }
// -------------------------------------------------------------------------------------------------
// Step 01: Inventory DNS setup
// - Keep DHCP simple; optionally register dynamic leases into DNS.
// - Ensure base records for internal/public domains (api/api-int/apps wildcard).
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
struct OKDSetup01InventoryDnsScore {
internal_domain: String,
public_domain: String,
register_dhcp_leases: Option<bool>,
}
impl<T: Topology + DnsServer> Score<T> for OKDSetup01InventoryDnsScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OKDSetup01InventoryDnsInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup01InventoryDnsScore".to_string()
}
}
#[derive(Debug, Clone)]
struct OKDSetup01InventoryDnsInterpret {
score: OKDSetup01InventoryDnsScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup01InventoryDnsInterpret {
pub fn new(score: OKDSetup01InventoryDnsScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn ensure_dns<T: DnsServer>(&self, dns: &T) -> Result<(), InterpretError> {
// Minimal records placeholders; real IPs are set elsewhere in the flow.
// We register the names early to ensure resolvability for clients relying on DNS.
let mut records: Vec<DnsRecord> = vec![
DnsRecord {
value: ip!("0.0.0.0"),
host: "api".to_string(),
domain: self.score.internal_domain.clone(),
record_type: DnsRecordType::A,
},
DnsRecord {
value: ip!("0.0.0.0"),
host: "api-int".to_string(),
domain: self.score.internal_domain.clone(),
record_type: DnsRecordType::A,
},
DnsRecord {
value: ip!("0.0.0.0"),
host: "*.apps.".to_string(),
domain: self.score.internal_domain.clone(),
record_type: DnsRecordType::A,
},
];
dns.ensure_hosts_registered(records.drain(..).collect())
.await?;
if let Some(register) = self.score.register_dhcp_leases {
dns.register_dhcp_leases(register).await?;
}
dns.commit_config().await?;
Ok(())
}
}
#[async_trait]
impl<T: Topology + DnsServer> Interpret<T> for OKDSetup01InventoryDnsInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup01InventoryDns")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!("Ensuring base DNS and DHCP lease registration for discovery phase");
self.ensure_dns(topology).await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Inventory DNS prepared".into(),
))
}
}
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent) // Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
// - This score exposes/ensures the default inventory assets and waits for discoveries. // - This score exposes/ensures the default inventory assets and waits for discoveries.
@@ -350,12 +227,10 @@ impl<T: Topology + DnsServer> Interpret<T> for OKDSetup01InventoryDnsInterpret {
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)] #[derive(Debug, Clone, Serialize, new)]
struct OKDSetup01InventoryScore { struct OKDSetup01InventoryScore {}
lan_cidr: String,
}
impl<T: Topology> Score<T> for OKDSetup01InventoryScore { impl Score<HAClusterTopology> for OKDSetup01InventoryScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup01InventoryInterpret::new(self.clone())) Box::new(OKDSetup01InventoryInterpret::new(self.clone()))
} }
@@ -380,35 +255,10 @@ impl OKDSetup01InventoryInterpret {
status: InterpretStatus::QUEUED, status: InterpretStatus::QUEUED,
} }
} }
async fn ensure_inventory_assets<T: Topology>(
&self,
topology: &T,
) -> Result<(), InterpretError> {
// Placeholder: push or verify iPXE default, Kickstart, and Rust inventory agent are hosted.
// Real implementation: publish to the PXE/HTTP server via the topology.
info!(
"[Inventory] Ensuring default iPXE, Kickstart, and inventory agent are available for LAN {}",
self.score.lan_cidr
);
// topology.publish_http_asset(…) ?
Ok(())
}
async fn discover_nodes(&self) -> Result<usize, InterpretError> {
// Placeholder: implement Harmony discovery logic (scan/pull/push mode).
// Returns number of newly discovered nodes.
info!(
"[Inventory] Scanning for inventory agents in {}",
self.score.lan_cidr
);
// In practice, this would query harmony_composer or a local registry store.
Ok(3)
}
} }
#[async_trait] #[async_trait]
impl<T: Topology> Interpret<T> for OKDSetup01InventoryInterpret { impl Interpret<HAClusterTopology> for OKDSetup01InventoryInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup01Inventory") InterpretName::Custom("OKDSetup01Inventory")
} }
@@ -427,16 +277,99 @@ impl<T: Topology> Interpret<T> for OKDSetup01InventoryInterpret {
async fn execute( async fn execute(
&self, &self,
_inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
self.ensure_inventory_assets(topology).await?; info!(
let count = self.discover_nodes().await?; "Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`"
info!("[Inventory] Discovered {count} nodes"); );
LaunchDiscoverInventoryAgentScore {
discovery_timeout: None,
}
.interpret(inventory, topology)
.await?;
let bootstrap_host: PhysicalHost;
let host_repo = InventoryRepositoryFactory::build().await?;
loop {
let all_hosts = host_repo.get_all_hosts().await?;
if all_hosts.is_empty() {
warn!("No discovered hosts found yet. Waiting for hosts to appear...");
// Sleep to avoid spamming the user and logs while waiting for nodes.
tokio::time::sleep(std::time::Duration::from_secs(5));
continue;
}
let ans = inquire::Select::new(
"Select the node to be used as the bootstrap node:",
all_hosts,
)
.with_help_message("Press Esc to refresh the list of discovered hosts")
.prompt();
match ans {
Ok(choice) => {
info!("Selected {} as the bootstrap node.", choice.summary());
host_repo
.save_role_mapping(&HostRole::Bootstrap, &choice)
.await?;
bootstrap_host = choice;
break;
}
Err(inquire::InquireError::OperationCanceled) => {
info!("Refresh requested. Fetching list of discovered hosts again...");
continue;
}
Err(e) => {
error!("Failed to select bootstrap node: {}", e);
return Err(InterpretError::new(format!(
"Could not select host : {}",
e.to_string()
)));
}
}
}
Ok(Outcome::new( Ok(Outcome::new(
InterpretStatus::SUCCESS, InterpretStatus::SUCCESS,
format!("Inventory phase complete. Nodes discovered: {count}"), format!(
"Found and assigned bootstrap node: {}",
bootstrap_host.summary()
),
)) ))
// info!(
// "Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`"
// );
// LaunchDiscoverInventoryAgentScore {
// discovery_timeout: Some(60),
// }
// .interpret(inventory, topology)
// .await?;
//
// // TODO write a loop
// let bootstrap_host: PhysicalHost;
// let mut found_bootstrap_host = false;
// let host_repo = InventoryRepositoryFactory::build().await?;
// while !found_bootstrap_host {
// let all_hosts = host_repo.get_all_hosts().await?;
// // TODO use inquire to select among the current hosts, tell the user to cancel if he
// // wants to update the list. I believe inquire::Select is the correct option here
// //
// // The options are all_hosts, all_hosts is of type Vec<PhysicalHost> and PhysicalHost
// // has a human friendly `summary() -> String` method that is perfect to have the user
// // choose
// //
// // once the user has chosen one, call host_repo.save_role_mapping(Role::Bootstrap,
// // host.id).await?;
// bootstrap_host = todo!();
// }
//
// Ok(Outcome::new(
// InterpretStatus::SUCCESS,
// format!("Found bootstrap node : {}", bootstrap_host.summary()),
// ))
} }
} }
@@ -449,13 +382,10 @@ impl<T: Topology> Interpret<T> for OKDSetup01InventoryInterpret {
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)] #[derive(Debug, Clone, Serialize, new)]
struct OKDSetup02BootstrapScore { struct OKDSetup02BootstrapScore {}
public_domain: String,
internal_domain: String,
}
impl<T: Topology> Score<T> for OKDSetup02BootstrapScore { impl Score<HAClusterTopology> for OKDSetup02BootstrapScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup02BootstrapInterpret::new(self.clone())) Box::new(OKDSetup02BootstrapInterpret::new(self.clone()))
} }
@@ -481,9 +411,57 @@ impl OKDSetup02BootstrapInterpret {
} }
} }
async fn render_per_mac_pxe(&self) -> Result<(), InterpretError> { async fn get_bootstrap_node(
&self,
_inventory: &Inventory,
) -> Result<PhysicalHost, InterpretError> {
let repo = InventoryRepositoryFactory::build().await?;
match repo
.get_host_for_role(HostRole::Bootstrap)
.await?
.into_iter()
.next()
{
Some(host) => Ok(host),
None => Err(InterpretError::new(
"No bootstrap node available".to_string(),
)),
}
}
async fn configure_host_binding(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let binding = HostBinding {
logical_host: topology.bootstrap_host.clone(),
physical_host: self.get_bootstrap_node(inventory).await?,
};
info!("Configuring host binding for bootstrap node {binding:?}");
DhcpHostBindingScore {
host_binding: vec![binding],
}
.interpret(inventory, topology)
.await?;
Ok(())
}
async fn render_per_mac_pxe(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
// Placeholder: use Harmony templates to emit {MAC}.ipxe selecting SCOS live + bootstrap ignition. // Placeholder: use Harmony templates to emit {MAC}.ipxe selecting SCOS live + bootstrap ignition.
info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node"); info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node");
let bootstrap_node = self.get_bootstrap_node(inventory).await?;
IPxeMacBootFileScore {
mac_address: bootstrap_node.get_mac_address(),
content: todo!("templace for bootstrap node"),
}
.interpret(inventory, topology)
.await?;
Ok(()) Ok(())
} }
@@ -501,7 +479,7 @@ impl OKDSetup02BootstrapInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology> Interpret<T> for OKDSetup02BootstrapInterpret { impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup02Bootstrap") InterpretName::Custom("OKDSetup02Bootstrap")
} }
@@ -520,10 +498,11 @@ impl<T: Topology> Interpret<T> for OKDSetup02BootstrapInterpret {
async fn execute( async fn execute(
&self, &self,
_inventory: &Inventory, inventory: &Inventory,
_topology: &T, topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
self.render_per_mac_pxe().await?; self.configure_host_binding(inventory, topology).await?;
self.render_per_mac_pxe(inventory, topology).await?;
self.reboot_target().await?; self.reboot_target().await?;
self.wait_for_bootstrap_complete().await?; self.wait_for_bootstrap_complete().await?;
@@ -543,8 +522,8 @@ impl<T: Topology> Interpret<T> for OKDSetup02BootstrapInterpret {
#[derive(Debug, Clone, Serialize, new)] #[derive(Debug, Clone, Serialize, new)]
struct OKDSetup03ControlPlaneScore {} struct OKDSetup03ControlPlaneScore {}
impl<T: Topology> Score<T> for OKDSetup03ControlPlaneScore { impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
} }
@@ -583,7 +562,7 @@ impl OKDSetup03ControlPlaneInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology> Interpret<T> for OKDSetup03ControlPlaneInterpret { impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup03ControlPlane") InterpretName::Custom("OKDSetup03ControlPlane")
} }
@@ -603,7 +582,7 @@ impl<T: Topology> Interpret<T> for OKDSetup03ControlPlaneInterpret {
async fn execute( async fn execute(
&self, &self,
_inventory: &Inventory, _inventory: &Inventory,
_topology: &T, _topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
self.render_and_reboot().await?; self.render_and_reboot().await?;
self.persist_network_bond().await?; self.persist_network_bond().await?;
@@ -623,8 +602,8 @@ impl<T: Topology> Interpret<T> for OKDSetup03ControlPlaneInterpret {
#[derive(Debug, Clone, Serialize, new)] #[derive(Debug, Clone, Serialize, new)]
struct OKDSetup04WorkersScore {} struct OKDSetup04WorkersScore {}
impl<T: Topology> Score<T> for OKDSetup04WorkersScore { impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup04WorkersInterpret::new(self.clone())) Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
} }
@@ -657,7 +636,7 @@ impl OKDSetup04WorkersInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology> Interpret<T> for OKDSetup04WorkersInterpret { impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup04Workers") InterpretName::Custom("OKDSetup04Workers")
} }
@@ -677,7 +656,7 @@ impl<T: Topology> Interpret<T> for OKDSetup04WorkersInterpret {
async fn execute( async fn execute(
&self, &self,
_inventory: &Inventory, _inventory: &Inventory,
_topology: &T, _topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
self.render_and_reboot().await?; self.render_and_reboot().await?;
Ok(Outcome::new( Ok(Outcome::new(
@@ -695,8 +674,8 @@ impl<T: Topology> Interpret<T> for OKDSetup04WorkersInterpret {
#[derive(Debug, Clone, Serialize, new)] #[derive(Debug, Clone, Serialize, new)]
struct OKDSetup05SanityCheckScore {} struct OKDSetup05SanityCheckScore {}
impl<T: Topology> Score<T> for OKDSetup05SanityCheckScore { impl Score<HAClusterTopology> for OKDSetup05SanityCheckScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup05SanityCheckInterpret::new(self.clone())) Box::new(OKDSetup05SanityCheckInterpret::new(self.clone()))
} }
@@ -729,7 +708,7 @@ impl OKDSetup05SanityCheckInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology> Interpret<T> for OKDSetup05SanityCheckInterpret { impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup05SanityCheck") InterpretName::Custom("OKDSetup05SanityCheck")
} }
@@ -749,7 +728,7 @@ impl<T: Topology> Interpret<T> for OKDSetup05SanityCheckInterpret {
async fn execute( async fn execute(
&self, &self,
_inventory: &Inventory, _inventory: &Inventory,
_topology: &T, _topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
self.run_checks().await?; self.run_checks().await?;
Ok(Outcome::new( Ok(Outcome::new(
@@ -765,13 +744,10 @@ impl<T: Topology> Interpret<T> for OKDSetup05SanityCheckInterpret {
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)] #[derive(Debug, Clone, Serialize, new)]
struct OKDSetup06InstallationReportScore { struct OKDSetup06InstallationReportScore {}
public_domain: String,
internal_domain: String,
}
impl<T: Topology> Score<T> for OKDSetup06InstallationReportScore { impl Score<HAClusterTopology> for OKDSetup06InstallationReportScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup06InstallationReportInterpret::new(self.clone())) Box::new(OKDSetup06InstallationReportInterpret::new(self.clone()))
} }
@@ -798,16 +774,13 @@ impl OKDSetup06InstallationReportInterpret {
} }
async fn generate(&self) -> Result<(), InterpretError> { async fn generate(&self) -> Result<(), InterpretError> {
info!( info!("[Report] Generating OKD installation report",);
"[Report] Generating installation report for {} / {}",
self.score.public_domain, self.score.internal_domain
);
Ok(()) Ok(())
} }
} }
#[async_trait] #[async_trait]
impl<T: Topology> Interpret<T> for OKDSetup06InstallationReportInterpret { impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup06InstallationReport") InterpretName::Custom("OKDSetup06InstallationReport")
} }
@@ -827,7 +800,7 @@ impl<T: Topology> Interpret<T> for OKDSetup06InstallationReportInterpret {
async fn execute( async fn execute(
&self, &self,
_inventory: &Inventory, _inventory: &Inventory,
_topology: &T, _topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
self.generate().await?; self.generate().await?;
Ok(Outcome::new( Ok(Outcome::new(

View File

@@ -1,9 +1,9 @@
use askama::Template; use askama::Template;
use async_trait::async_trait; use async_trait::async_trait;
use derive_new::new; use derive_new::new;
use harmony_types::net::Url; use harmony_types::net::{IpAddress, Url};
use serde::Serialize; use serde::Serialize;
use std::net::IpAddr; use std::net::{IpAddr, Ipv4Addr};
use crate::{ use crate::{
data::{FileContent, FilePath, Version}, data::{FileContent, FilePath, Version},
@@ -46,6 +46,16 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let gateway_ip = topology.get_gateway(); let gateway_ip = topology.get_gateway();
let dhcp_server_ip = match DhcpServer::get_ip(topology) {
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"),
};
// TODO this could overflow, we should use proper subnet maths here instead of an ip
// address and guessing the subnet size from there
let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100);
let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150);
let scores: Vec<Box<dyn Score<T>>> = vec![ let scores: Vec<Box<dyn Score<T>>> = vec![
Box::new(DhcpScore { Box::new(DhcpScore {
host_binding: vec![], host_binding: vec![],
@@ -54,6 +64,7 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f
filename: Some("undionly.kpxe".to_string()), filename: Some("undionly.kpxe".to_string()),
filename64: Some("ipxe.efi".to_string()), filename64: Some("ipxe.efi".to_string()),
filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()), filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()),
dhcp_range: (IpAddress::from(start), IpAddress::from(end)),
}), }),
Box::new(TftpScore { Box::new(TftpScore {
files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()), files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()),

View File

@@ -9,6 +9,7 @@ use config::INFISICAL_ENVIRONMENT;
use config::INFISICAL_PROJECT_ID; use config::INFISICAL_PROJECT_ID;
use config::INFISICAL_URL; use config::INFISICAL_URL;
use config::SECRET_STORE; use config::SECRET_STORE;
use log::debug;
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use std::fmt; use std::fmt;
use store::InfisicalSecretStore; use store::InfisicalSecretStore;
@@ -101,6 +102,7 @@ impl SecretManager {
/// Retrieves and deserializes a secret. /// Retrieves and deserializes a secret.
pub async fn get<T: Secret>() -> Result<T, SecretStoreError> { pub async fn get<T: Secret>() -> Result<T, SecretStoreError> {
let manager = get_secret_manager().await; let manager = get_secret_manager().await;
debug!("Getting secret ns {} key {}", &manager.namespace, T::KEY);
let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?; let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?;
serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization { serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization {
key: T::KEY.to_string(), key: T::KEY.to_string(),

View File

@@ -1,5 +1,5 @@
use async_trait::async_trait; use async_trait::async_trait;
use log::info; use log::{debug, info};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use crate::{SecretStore, SecretStoreError}; use crate::{SecretStore, SecretStoreError};
@@ -24,7 +24,7 @@ impl SecretStore for LocalFileSecretStore {
.join("secrets"); .join("secrets");
let file_path = Self::get_file_path(&data_dir, ns, key); let file_path = Self::get_file_path(&data_dir, ns, key);
info!( debug!(
"LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}", "LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}",
file_path.display() file_path.display()
); );

View File

@@ -48,6 +48,12 @@ impl From<String> for Id {
} }
} }
impl From<Id> for String {
fn from(value: Id) -> Self {
value.to_string()
}
}
impl std::fmt::Display for Id { impl std::fmt::Display for Id {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.value) f.write_str(&self.value)

View File

@@ -0,0 +1,5 @@
CREATE TABLE IF NOT EXISTS host_role_mapping (
id INTEGER PRIMARY KEY AUTOINCREMENT,
host_id TEXT NOT NULL,
role TEXT NOT NULL
);

View File

@@ -36,6 +36,27 @@ pub struct DnsMasq {
pub dhcp_options: Vec<DhcpOptions>, pub dhcp_options: Vec<DhcpOptions>,
pub dhcp_boot: Vec<DhcpBoot>, pub dhcp_boot: Vec<DhcpBoot>,
pub dhcp_tags: Vec<RawXml>, pub dhcp_tags: Vec<RawXml>,
pub hosts: Vec<DnsmasqHost>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize, Clone)]
#[yaserde(rename = "hosts")]
pub struct DnsmasqHost {
#[yaserde(attribute = true)]
pub uuid: String,
pub host: String,
pub domain: MaybeString,
pub local: MaybeString,
pub ip: MaybeString,
pub cnames: MaybeString,
pub client_id: MaybeString,
pub hwaddr: MaybeString,
pub lease_time: MaybeString,
pub ignore: Option<u8>,
pub set_tag: MaybeString,
pub descr: MaybeString,
pub comments: MaybeString,
pub aliases: MaybeString,
} }
// Represents the <dhcp> element and its nested fields. // Represents the <dhcp> element and its nested fields.

View File

@@ -226,6 +226,7 @@ mod tests {
"src/tests/data/config-full-ncd0.xml", "src/tests/data/config-full-ncd0.xml",
"src/tests/data/config-full-25.7.xml", "src/tests/data/config-full-25.7.xml",
"src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml", "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
"src/tests/data/config-25.7-dnsmasq-static-host.xml",
] { ] {
let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
test_file_path.push(path); test_file_path.push(path);

View File

@@ -1,9 +1,12 @@
// dnsmasq.rs // dnsmasq.rs
use crate::modules::dhcp::DhcpError; use crate::modules::dhcp::DhcpError;
use log::{debug, info}; use log::{debug, info, warn};
use opnsense_config_xml::dnsmasq::{DhcpRange, DnsMasq, DnsmasqHost}; // Assuming DhcpRange is defined in opnsense_config_xml::dnsmasq
use opnsense_config_xml::{MaybeString, StaticMap}; use opnsense_config_xml::{MaybeString, StaticMap};
use std::collections::HashSet;
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::sync::Arc; use std::sync::Arc;
use uuid::Uuid;
use opnsense_config_xml::OPNsense; use opnsense_config_xml::OPNsense;
@@ -25,74 +28,156 @@ impl<'a> DhcpConfigDnsMasq<'a> {
} }
} }
/// Removes a static mapping by its MAC address. /// Removes a MAC address from a static mapping.
/// Static mappings are stored in the <dhcpd> section of the config, shared with the ISC module. /// If the mapping has no other MAC addresses associated with it, the entire host entry is removed.
pub fn remove_static_mapping(&mut self, mac: &str) { pub fn remove_static_mapping(&mut self, mac_to_remove: &str) {
let lan_dhcpd = self.get_lan_dhcpd(); let dnsmasq = self.get_dnsmasq();
lan_dhcpd
.staticmaps // Update hwaddr fields for hosts that contain the MAC, removing it from the comma-separated list.
.retain(|static_entry| static_entry.mac != mac); for host in dnsmasq.hosts.iter_mut() {
let mac = host.hwaddr.content_string();
let original_macs: Vec<&str> = mac.split(',').collect();
if original_macs
.iter()
.any(|m| m.eq_ignore_ascii_case(mac_to_remove))
{
let updated_macs: Vec<&str> = original_macs
.into_iter()
.filter(|m| !m.eq_ignore_ascii_case(mac_to_remove))
.collect();
host.hwaddr = updated_macs.join(",").into();
}
} }
/// Retrieves a mutable reference to the LAN interface's DHCP configuration. // Remove any host entries that no longer have any MAC addresses.
/// This is located in the shared <dhcpd> section of the config. dnsmasq
fn get_lan_dhcpd(&mut self) -> &mut opnsense_config_xml::DhcpInterface { .hosts
&mut self .retain(|host_entry| !host_entry.hwaddr.content_string().is_empty());
.opnsense
.dhcpd
.elements
.iter_mut()
.find(|(name, _config)| name == "lan")
.expect("Interface lan should have dhcpd activated")
.1
} }
/// Adds a new static DHCP mapping. /// Retrieves a mutable reference to the DnsMasq configuration.
/// Validates the MAC address and checks for existing mappings to prevent conflicts. /// This is located in the <dnsmasq> section of the OPNsense config.
fn get_dnsmasq(&mut self) -> &mut DnsMasq {
self.opnsense
.dnsmasq
.as_mut()
.expect("Dnsmasq config must be initialized")
}
/// Adds or updates a static DHCP mapping.
///
/// This function implements specific logic to handle existing entries:
/// - If no host exists for the given IP or hostname, a new entry is created.
/// - If exactly one host exists for the IP and/or hostname, the new MAC is appended to it.
/// - It will panic if the IP and hostname exist but point to two different host entries,
/// as this represents an unresolvable conflict.
/// - It will also panic if multiple entries are found for the IP or hostname, indicating an
/// ambiguous state.
pub fn add_static_mapping( pub fn add_static_mapping(
&mut self, &mut self,
mac: &str, mac: &str,
ipaddr: Ipv4Addr, ipaddr: Ipv4Addr,
hostname: &str, hostname: &str,
) -> Result<(), DhcpError> { ) -> Result<(), DhcpError> {
let mac = mac.to_string(); if !Self::is_valid_mac(mac) {
let hostname = hostname.to_string(); return Err(DhcpError::InvalidMacAddress(mac.to_string()));
let lan_dhcpd = self.get_lan_dhcpd();
let existing_mappings: &mut Vec<StaticMap> = &mut lan_dhcpd.staticmaps;
if !Self::is_valid_mac(&mac) {
return Err(DhcpError::InvalidMacAddress(mac));
} }
// TODO: Validate that the IP address is within a configured DHCP range. let ip_str = ipaddr.to_string();
let hosts = &mut self.get_dnsmasq().hosts;
if existing_mappings let ip_indices: Vec<usize> = hosts
.iter() .iter()
.any(|m| m.ipaddr == ipaddr.to_string() && m.mac == mac) .enumerate()
{ .filter(|(_, h)| h.ip.content_string() == ip_str)
info!("Mapping already exists for {} [{}], skipping", ipaddr, mac); .map(|(i, _)| i)
return Ok(()); .collect();
}
if existing_mappings let hostname_indices: Vec<usize> = hosts
.iter() .iter()
.any(|m| m.ipaddr == ipaddr.to_string()) .enumerate()
.filter(|(_, h)| h.host == hostname)
.map(|(i, _)| i)
.collect();
let ip_set: HashSet<usize> = ip_indices.iter().cloned().collect();
let hostname_set: HashSet<usize> = hostname_indices.iter().cloned().collect();
if !ip_indices.is_empty()
&& !hostname_indices.is_empty()
&& ip_set.intersection(&hostname_set).count() == 0
{ {
return Err(DhcpError::IpAddressAlreadyMapped(ipaddr.to_string())); panic!(
"Configuration conflict: IP {} and hostname '{}' exist, but in different static host entries.",
ipaddr, hostname
);
} }
if existing_mappings.iter().any(|m| m.mac == mac) { let mut all_indices: Vec<&usize> = ip_set.union(&hostname_set).collect();
return Err(DhcpError::MacAddressAlreadyMapped(mac)); all_indices.sort();
}
let static_map = StaticMap { match all_indices.len() {
mac, 0 => {
ipaddr: ipaddr.to_string(), info!(
hostname: hostname, "Creating new static host for {} ({}) with MAC {}",
hostname, ipaddr, mac
);
let new_host = DnsmasqHost {
uuid: Uuid::new_v4().to_string(),
host: hostname.to_string(),
ip: ip_str.into(),
hwaddr: mac.to_string().into(),
local: MaybeString::from("1"),
ignore: Some(0),
..Default::default() ..Default::default()
}; };
hosts.push(new_host);
}
1 => {
let host_index = *all_indices[0];
let host_to_modify = &mut hosts[host_index];
let host_to_modify_ip = host_to_modify.ip.content_string();
if host_to_modify_ip != ip_str {
warn!(
"Hostname '{}' already exists with a different IP ({}). Appending MAC {}.",
hostname, host_to_modify_ip, mac
);
} else if host_to_modify.host != hostname {
warn!(
"IP {} already exists with a different hostname ('{}'). Appending MAC {}.",
ipaddr, host_to_modify.host, mac
);
}
if !host_to_modify
.hwaddr
.content_string()
.split(',')
.any(|m| m.eq_ignore_ascii_case(mac))
{
info!(
"Appending MAC {} to existing static host for {} ({})",
mac, host_to_modify.host, host_to_modify_ip
);
let mut updated_macs = host_to_modify.hwaddr.content_string().to_string();
updated_macs.push(',');
updated_macs.push_str(mac);
host_to_modify.hwaddr = updated_macs.into();
} else {
info!(
"MAC {} already present in static host entry for {} ({}). No changes made.",
mac, host_to_modify.host, host_to_modify_ip
);
}
}
_ => {
panic!(
"Configuration conflict: Found multiple host entries matching IP {} and/or hostname '{}'. Cannot resolve automatically.",
ipaddr, hostname
);
}
}
existing_mappings.push(static_map);
Ok(()) Ok(())
} }
@@ -110,6 +195,8 @@ impl<'a> DhcpConfigDnsMasq<'a> {
/// Retrieves the list of current static mappings by shelling out to `configctl`. /// Retrieves the list of current static mappings by shelling out to `configctl`.
/// This provides the real-time state from the running system. /// This provides the real-time state from the running system.
pub async fn get_static_mappings(&self) -> Result<Vec<StaticMap>, Error> { pub async fn get_static_mappings(&self) -> Result<Vec<StaticMap>, Error> {
// Note: This command is for the 'dhcpd' service. If dnsmasq uses a different command
// or key, this will need to be adjusted.
let list_static_output = self let list_static_output = self
.opnsense_shell .opnsense_shell
.exec("configctl dhcpd list static") .exec("configctl dhcpd list static")
@@ -117,6 +204,8 @@ impl<'a> DhcpConfigDnsMasq<'a> {
let value: serde_json::Value = serde_json::from_str(&list_static_output) let value: serde_json::Value = serde_json::from_str(&list_static_output)
.unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}")); .unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}"));
// The JSON output key might be 'dhcpd' even when dnsmasq is the backend.
let static_maps = value["dhcpd"] let static_maps = value["dhcpd"]
.as_array() .as_array()
.ok_or(Error::Command(format!( .ok_or(Error::Command(format!(
@@ -135,6 +224,36 @@ impl<'a> DhcpConfigDnsMasq<'a> {
Ok(static_maps) Ok(static_maps)
} }
pub async fn set_dhcp_range(&mut self, start: &str, end: &str) -> Result<(), DhcpError> {
let dnsmasq = self.get_dnsmasq();
let ranges = &mut dnsmasq.dhcp_ranges;
// Assuming DnsMasq has dhcp_ranges: Vec<DhcpRange>
// Find existing range for "lan" interface
if let Some(range) = ranges
.iter_mut()
.find(|r| r.interface == Some("lan".to_string()))
{
// Update existing range
range.start_addr = Some(start.to_string());
range.end_addr = Some(end.to_string());
} else {
// Create new range
let new_range = DhcpRange {
uuid: Some(Uuid::new_v4().to_string()),
interface: Some("lan".to_string()),
start_addr: Some(start.to_string()),
end_addr: Some(end.to_string()),
domain_type: Some("range".to_string()),
nosync: Some(0),
..Default::default()
};
ranges.push(new_range);
}
Ok(())
}
pub async fn set_pxe_options( pub async fn set_pxe_options(
&self, &self,
tftp_ip: Option<String>, tftp_ip: Option<String>,
@@ -142,9 +261,9 @@ impl<'a> DhcpConfigDnsMasq<'a> {
efi_filename: String, efi_filename: String,
ipxe_filename: String, ipxe_filename: String,
) -> Result<(), DhcpError> { ) -> Result<(), DhcpError> {
// As of writing this opnsense does not support negative tags, and the dnsmasq config is a // OPNsense does not support negative tags via its API for dnsmasq, and the required
// bit complicated anyways. So we are writing directly a dnsmasq config file to // logic is complex. Therefore, we write a configuration file directly to the
// /usr/local/etc/dnsmasq.conf.d // dnsmasq.conf.d directory to achieve the desired PXE boot behavior.
let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}")); let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}"));
let config = format!( let config = format!(
@@ -185,3 +304,274 @@ dhcp-boot=tag:bios,{bios_filename}{tftp_str}
Ok(()) Ok(())
} }
} }
#[cfg(test)]
mod test {
use crate::config::DummyOPNSenseShell;
use super::*;
use opnsense_config_xml::OPNsense;
use std::net::Ipv4Addr;
use std::sync::Arc;
/// Helper function to create a DnsmasqHost with minimal boilerplate.
fn create_host(uuid: &str, host: &str, ip: &str, hwaddr: &str) -> DnsmasqHost {
DnsmasqHost {
uuid: uuid.to_string(),
host: host.to_string(),
ip: ip.into(),
hwaddr: hwaddr.into(),
local: MaybeString::from("1"),
ignore: Some(0),
..Default::default()
}
}
/// Helper to set up the test environment with an initial OPNsense configuration.
fn setup_test_env(initial_hosts: Vec<DnsmasqHost>) -> DhcpConfigDnsMasq<'static> {
let opnsense_config = Box::leak(Box::new(OPNsense {
dnsmasq: Some(DnsMasq {
hosts: initial_hosts,
..Default::default()
}),
..Default::default()
}));
DhcpConfigDnsMasq::new(opnsense_config, Arc::new(DummyOPNSenseShell {}))
}
#[test]
fn test_add_first_static_mapping() {
let mut dhcp_config = setup_test_env(vec![]);
let ip = Ipv4Addr::new(192, 168, 1, 10);
let mac = "00:11:22:33:44:55";
let hostname = "new-host";
dhcp_config.add_static_mapping(mac, ip, hostname).unwrap();
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 1);
let host = &hosts[0];
assert_eq!(host.host, hostname);
assert_eq!(host.ip, ip.to_string().into());
assert_eq!(host.hwaddr.content_string(), mac);
assert!(Uuid::parse_str(&host.uuid).is_ok());
}
#[test]
fn test_add_mac_to_existing_host_by_ip_and_hostname() {
let initial_host = create_host(
"uuid-1",
"existing-host",
"192.168.1.20",
"AA:BB:CC:DD:EE:FF",
);
let mut dhcp_config = setup_test_env(vec![initial_host]);
let ip = Ipv4Addr::new(192, 168, 1, 20);
let new_mac = "00:11:22:33:44:55";
let hostname = "existing-host";
dhcp_config
.add_static_mapping(new_mac, ip, hostname)
.unwrap();
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 1);
let host = &hosts[0];
assert_eq!(
host.hwaddr.content_string(),
"AA:BB:CC:DD:EE:FF,00:11:22:33:44:55"
);
}
#[test]
fn test_add_mac_to_existing_host_by_ip_only() {
let initial_host = create_host(
"uuid-1",
"existing-host",
"192.168.1.20",
"AA:BB:CC:DD:EE:FF",
);
let mut dhcp_config = setup_test_env(vec![initial_host]);
let ip = Ipv4Addr::new(192, 168, 1, 20);
let new_mac = "00:11:22:33:44:55";
// Using a different hostname should still find the host by IP and log a warning.
dhcp_config
.add_static_mapping(new_mac, ip, "different-host-name")
.unwrap();
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 1);
let host = &hosts[0];
assert_eq!(
host.hwaddr.content_string(),
"AA:BB:CC:DD:EE:FF,00:11:22:33:44:55"
);
assert_eq!(host.host, "existing-host"); // Original hostname should be preserved.
}
#[test]
fn test_add_mac_to_existing_host_by_hostname_only() {
let initial_host = create_host(
"uuid-1",
"existing-host",
"192.168.1.20",
"AA:BB:CC:DD:EE:FF",
);
let mut dhcp_config = setup_test_env(vec![initial_host]);
let new_mac = "00:11:22:33:44:55";
let hostname = "existing-host";
// Using a different IP should still find the host by hostname and log a warning.
dhcp_config
.add_static_mapping(new_mac, Ipv4Addr::new(192, 168, 1, 99), hostname)
.unwrap();
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 1);
let host = &hosts[0];
assert_eq!(
host.hwaddr.content_string(),
"AA:BB:CC:DD:EE:FF,00:11:22:33:44:55"
);
assert_eq!(host.ip.content_string(), "192.168.1.20"); // Original IP should be preserved.
}
#[test]
fn test_add_duplicate_mac_to_host() {
let initial_mac = "AA:BB:CC:DD:EE:FF";
let initial_host = create_host("uuid-1", "host-1", "192.168.1.20", initial_mac);
let mut dhcp_config = setup_test_env(vec![initial_host]);
dhcp_config
.add_static_mapping(initial_mac, Ipv4Addr::new(192, 168, 1, 20), "host-1")
.unwrap();
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 1);
assert_eq!(hosts[0].hwaddr.content_string(), initial_mac); // No change, no duplication.
}
#[test]
fn test_add_invalid_mac_address() {
let mut dhcp_config = setup_test_env(vec![]);
let result =
dhcp_config.add_static_mapping("invalid-mac", Ipv4Addr::new(10, 0, 0, 1), "host");
assert!(matches!(result, Err(DhcpError::InvalidMacAddress(_))));
}
#[test]
#[should_panic(
expected = "Configuration conflict: IP 192.168.1.10 and hostname 'host-b' exist, but in different static host entries."
)]
fn test_panic_on_conflicting_ip_and_hostname() {
let host_a = create_host("uuid-a", "host-a", "192.168.1.10", "AA:AA:AA:AA:AA:AA");
let host_b = create_host("uuid-b", "host-b", "192.168.1.20", "BB:BB:BB:BB:BB:BB");
let mut dhcp_config = setup_test_env(vec![host_a, host_b]);
// This IP belongs to host-a, but the hostname belongs to host-b.
dhcp_config
.add_static_mapping(
"CC:CC:CC:CC:CC:CC",
Ipv4Addr::new(192, 168, 1, 10),
"host-b",
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Configuration conflict: Found multiple host entries matching IP 192.168.1.30 and/or hostname 'new-host'."
)]
fn test_panic_on_multiple_ip_matches() {
let host_a = create_host("uuid-a", "host-a", "192.168.1.30", "AA:AA:AA:AA:AA:AA");
let host_b = create_host("uuid-b", "host-b", "192.168.1.30", "BB:BB:BB:BB:BB:BB");
let mut dhcp_config = setup_test_env(vec![host_a, host_b]);
// This IP is ambiguous.
dhcp_config
.add_static_mapping(
"CC:CC:CC:CC:CC:CC",
Ipv4Addr::new(192, 168, 1, 30),
"new-host",
)
.unwrap();
}
#[test]
fn test_remove_mac_from_multi_mac_host() {
let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2,mac-3");
let mut dhcp_config = setup_test_env(vec![host]);
dhcp_config.remove_static_mapping("mac-2");
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 1);
assert_eq!(hosts[0].hwaddr.content_string(), "mac-1,mac-3");
}
#[test]
fn test_remove_last_mac_from_host() {
let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1");
let mut dhcp_config = setup_test_env(vec![host]);
dhcp_config.remove_static_mapping("mac-1");
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert!(hosts.is_empty());
}
#[test]
fn test_remove_non_existent_mac() {
let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2");
let mut dhcp_config = setup_test_env(vec![host.clone()]);
dhcp_config.remove_static_mapping("mac-nonexistent");
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 1);
assert_eq!(hosts[0], host); // The host should be unchanged.
}
#[test]
fn test_remove_mac_case_insensitively() {
let host = create_host("uuid-1", "host-1", "192.168.1.50", "AA:BB:CC:DD:EE:FF");
let mut dhcp_config = setup_test_env(vec![host]);
dhcp_config.remove_static_mapping("aa:bb:cc:dd:ee:ff");
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert!(hosts.is_empty());
}
#[test]
fn test_remove_mac_from_correct_host_only() {
let host1 = create_host(
"uuid-1",
"host-1",
"192.168.1.50",
"AA:AA:AA:AA:AA:AA,BB:BB:BB:BB:BB:BB",
);
let host2 = create_host(
"uuid-2",
"host-2",
"192.168.1.51",
"CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD",
);
let mut dhcp_config = setup_test_env(vec![host1.clone(), host2.clone()]);
dhcp_config.remove_static_mapping("AA:AA:AA:AA:AA:AA");
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
assert_eq!(hosts.len(), 2);
let updated_host1 = hosts.iter().find(|h| h.uuid == "uuid-1").unwrap();
let unchanged_host2 = hosts.iter().find(|h| h.uuid == "uuid-2").unwrap();
assert_eq!(updated_host1.hwaddr.content_string(), "BB:BB:BB:BB:BB:BB");
assert_eq!(
unchanged_host2.hwaddr.content_string(),
"CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD"
);
}
}

File diff suppressed because it is too large Load Diff