Compare commits
12 Commits
feat/multi
...
5142e2dd2d
| Author | SHA1 | Date | |
|---|---|---|---|
| 5142e2dd2d | |||
| ceea03d6ce | |||
| f1209b3823 | |||
| 6f746d4c88 | |||
| 75f27a2b85 | |||
| d24ea23413 | |||
| 0070373714 | |||
| f6e665f990 | |||
| 241980ebec | |||
| 35a459f63c | |||
| f076d36297 | |||
| 138e414727 |
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -2,3 +2,5 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text
|
|||||||
grubx64.efi filter=lfs diff=lfs merge=lfs -text
|
grubx64.efi filter=lfs diff=lfs merge=lfs -text
|
||||||
initrd filter=lfs diff=lfs merge=lfs -text
|
initrd filter=lfs diff=lfs merge=lfs -text
|
||||||
linux filter=lfs diff=lfs merge=lfs -text
|
linux filter=lfs diff=lfs merge=lfs -text
|
||||||
|
data/okd/bin/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|||||||
20
.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json
generated
Normal file
20
.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json
generated
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "host_id",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 1
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
|
||||||
|
}
|
||||||
32
.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json
generated
Normal file
32
.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json
generated
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json<PhysicalHost>\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "version_id",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "data: Json<PhysicalHost>",
|
||||||
|
"ordinal": 2,
|
||||||
|
"type_info": "Blob"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 0
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067"
|
||||||
|
}
|
||||||
12
.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json
generated
Normal file
12
.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json
generated
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 2
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
|
||||||
|
}
|
||||||
685
Cargo.lock
generated
685
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,3 @@
|
|||||||
use log::debug;
|
|
||||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||||
|
|
||||||
use crate::SERVICE_TYPE;
|
use crate::SERVICE_TYPE;
|
||||||
@@ -74,7 +73,7 @@ pub async fn discover() {
|
|||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn discover_example() {
|
async fn _discover_example() {
|
||||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||||
|
|
||||||
// Create a daemon
|
// Create a daemon
|
||||||
|
|||||||
BIN
data/okd/bin/kubectl
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/kubectl
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/okd/bin/oc
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/oc
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/okd/bin/oc_README.md
(Stored with Git LFS)
Normal file
BIN
data/okd/bin/oc_README.md
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/bin/openshift-install
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/openshift-install
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/okd/bin/openshift-install_README.md
(Stored with Git LFS)
Normal file
BIN
data/okd/bin/openshift-install_README.md
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
(Stored with Git LFS)
Normal file
Binary file not shown.
1
data/okd/installer_image/scos-live-initramfs.x86_64.img
Symbolic link
1
data/okd/installer_image/scos-live-initramfs.x86_64.img
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
scos-9.0.20250510-0-live-initramfs.x86_64.img
|
||||||
1
data/okd/installer_image/scos-live-kernel.x86_64
Symbolic link
1
data/okd/installer_image/scos-live-kernel.x86_64
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
scos-9.0.20250510-0-live-kernel.x86_64
|
||||||
1
data/okd/installer_image/scos-live-rootfs.x86_64.img
Symbolic link
1
data/okd/installer_image/scos-live-rootfs.x86_64.img
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
scos-9.0.20250510-0-live-rootfs.x86_64.img
|
||||||
@@ -2,7 +2,7 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
inventory::DiscoverInventoryAgentScore,
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
},
|
},
|
||||||
topology::LocalhostTopology,
|
topology::LocalhostTopology,
|
||||||
};
|
};
|
||||||
@@ -16,7 +16,7 @@ async fn main() {
|
|||||||
Box::new(SuccessScore {}),
|
Box::new(SuccessScore {}),
|
||||||
Box::new(ErrorScore {}),
|
Box::new(ErrorScore {}),
|
||||||
Box::new(PanicScore {}),
|
Box::new(PanicScore {}),
|
||||||
Box::new(DiscoverInventoryAgentScore {
|
Box::new(LaunchDiscoverInventoryAgentScore {
|
||||||
discovery_timeout: Some(10),
|
discovery_timeout: Some(10),
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -5,16 +5,15 @@ use std::{
|
|||||||
|
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
http::StaticFilesHttpScore,
|
http::StaticFilesHttpScore,
|
||||||
ipxe::IpxeScore,
|
|
||||||
okd::{
|
okd::{
|
||||||
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
||||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
|
||||||
dns::OKDDnsScore,
|
dns::OKDDnsScore, ipxe::OkdIpxeScore,
|
||||||
},
|
},
|
||||||
tftp::TftpScore,
|
tftp::TftpScore,
|
||||||
},
|
},
|
||||||
@@ -130,8 +129,18 @@ async fn main() {
|
|||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
)),
|
)),
|
||||||
files: vec![],
|
files: vec![],
|
||||||
|
remote_path: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let kickstart_filename = "inventory.kickstart".to_string();
|
||||||
|
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
|
||||||
|
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
|
||||||
|
|
||||||
|
let ipxe_score = OkdIpxeScore {
|
||||||
|
kickstart_filename,
|
||||||
|
harmony_inventory_agent,
|
||||||
|
cluster_pubkey_filename,
|
||||||
};
|
};
|
||||||
let ipxe_score = IpxeScore::new();
|
|
||||||
|
|
||||||
harmony_tui::run(
|
harmony_tui::run(
|
||||||
inventory,
|
inventory,
|
||||||
|
|||||||
21
examples/okd_installation/Cargo.toml
Normal file
21
examples/okd_installation/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-okd-install"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
harmony_secret = { path = "../../harmony_secret" }
|
||||||
|
harmony_secret_derive = { path = "../../harmony_secret_derive" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
|
serde.workspace = true
|
||||||
4
examples/okd_installation/env.sh
Normal file
4
examples/okd_installation/env.sh
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
export HARMONY_SECRET_NAMESPACE=example-vms
|
||||||
|
export HARMONY_SECRET_STORE=file
|
||||||
|
export HARMONY_DATABASE_URL=sqlite://harmony_vms.sqlite RUST_LOG=info
|
||||||
|
export RUST_LOG=info
|
||||||
26
examples/okd_installation/src/main.rs
Normal file
26
examples/okd_installation/src/main.rs
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
mod topology;
|
||||||
|
|
||||||
|
use crate::topology::{get_inventory, get_topology};
|
||||||
|
use harmony::{
|
||||||
|
modules::okd::{installation::OKDInstallationScore, ipxe::OkdIpxeScore},
|
||||||
|
score::Score,
|
||||||
|
topology::HAClusterTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let inventory = get_inventory();
|
||||||
|
let topology = get_topology().await;
|
||||||
|
|
||||||
|
let scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![
|
||||||
|
Box::new(OkdIpxeScore {
|
||||||
|
kickstart_filename: "inventory.kickstart".to_string(),
|
||||||
|
harmony_inventory_agent: "cluster_ssh_key.pub".to_string(),
|
||||||
|
cluster_pubkey_filename: "harmony_inventory_agent".to_string(),
|
||||||
|
}),
|
||||||
|
Box::new(OKDInstallationScore {}),
|
||||||
|
];
|
||||||
|
harmony_cli::run(inventory, topology, scores, None)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
77
examples/okd_installation/src/topology.rs
Normal file
77
examples/okd_installation/src/topology.rs
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
use cidr::Ipv4Cidr;
|
||||||
|
use harmony::{
|
||||||
|
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
|
inventory::Inventory,
|
||||||
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
|
};
|
||||||
|
use harmony_macros::{ip, ipv4};
|
||||||
|
use harmony_secret::{Secret, SecretManager};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{net::IpAddr, sync::Arc};
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||||
|
struct OPNSenseFirewallConfig {
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_topology() -> HAClusterTopology {
|
||||||
|
let firewall = harmony::topology::LogicalHost {
|
||||||
|
ip: ip!("192.168.1.1"),
|
||||||
|
name: String::from("opnsense-1"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let config = SecretManager::get::<OPNSenseFirewallConfig>().await;
|
||||||
|
let config = config.unwrap();
|
||||||
|
|
||||||
|
let opnsense = Arc::new(
|
||||||
|
harmony::infra::opnsense::OPNSenseFirewall::new(
|
||||||
|
firewall,
|
||||||
|
None,
|
||||||
|
&config.username,
|
||||||
|
&config.password,
|
||||||
|
)
|
||||||
|
.await,
|
||||||
|
);
|
||||||
|
let lan_subnet = ipv4!("192.168.1.0");
|
||||||
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
|
harmony::topology::HAClusterTopology {
|
||||||
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
|
gateway_ip,
|
||||||
|
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
|
||||||
|
)),
|
||||||
|
load_balancer: opnsense.clone(),
|
||||||
|
firewall: opnsense.clone(),
|
||||||
|
tftp_server: opnsense.clone(),
|
||||||
|
http_server: opnsense.clone(),
|
||||||
|
dhcp_server: opnsense.clone(),
|
||||||
|
dns_server: opnsense.clone(),
|
||||||
|
control_plane: vec![LogicalHost {
|
||||||
|
ip: ip!("192.168.1.20"),
|
||||||
|
name: "master".to_string(),
|
||||||
|
}],
|
||||||
|
bootstrap_host: LogicalHost {
|
||||||
|
ip: ip!("192.168.1.10"),
|
||||||
|
name: "bootstrap".to_string(),
|
||||||
|
},
|
||||||
|
workers: vec![],
|
||||||
|
switch: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_inventory() -> Inventory {
|
||||||
|
Inventory {
|
||||||
|
location: Location::new(
|
||||||
|
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
|
||||||
|
"testopnsense".to_string(),
|
||||||
|
),
|
||||||
|
switch: SwitchGroup::from([]),
|
||||||
|
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||||
|
storage_host: vec![],
|
||||||
|
worker_host: vec![],
|
||||||
|
control_plane_host: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
7
examples/okd_installation/ssh_example_key
Normal file
7
examples/okd_installation/ssh_example_key
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||||
|
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||||
|
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
|
||||||
|
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
|
||||||
|
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
|
||||||
|
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
|
||||||
|
-----END OPENSSH PRIVATE KEY-----
|
||||||
1
examples/okd_installation/ssh_example_key.pub
Normal file
1
examples/okd_installation/ssh_example_key.pub
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
||||||
@@ -1,28 +1,22 @@
|
|||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
config::secret::OPNSenseFirewallCredentials,
|
||||||
|
hardware::{Location, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::SecretManager;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{net::IpAddr, sync::Arc};
|
use std::{net::IpAddr, sync::Arc};
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
|
||||||
struct OPNSenseFirewallConfig {
|
|
||||||
username: String,
|
|
||||||
password: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
pub async fn get_topology() -> HAClusterTopology {
|
||||||
let firewall = harmony::topology::LogicalHost {
|
let firewall = harmony::topology::LogicalHost {
|
||||||
ip: ip!("192.168.1.1"),
|
ip: ip!("192.168.1.1"),
|
||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let config = SecretManager::get::<OPNSenseFirewallConfig>().await;
|
let config = SecretManager::get::<OPNSenseFirewallCredentials>().await;
|
||||||
let config = config.unwrap();
|
let config = config.unwrap();
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
let opnsense = Arc::new(
|
||||||
|
|||||||
@@ -85,6 +85,7 @@ async fn main() {
|
|||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
)),
|
)),
|
||||||
files: vec![],
|
files: vec![],
|
||||||
|
remote_path: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
harmony_tui::run(
|
harmony_tui::run(
|
||||||
|
|||||||
@@ -67,9 +67,11 @@ base64.workspace = true
|
|||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
||||||
harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" }
|
harmony_secret_derive = { path = "../harmony_secret_derive" }
|
||||||
|
harmony_secret = { path = "../harmony_secret" }
|
||||||
askama.workspace = true
|
askama.workspace = true
|
||||||
sqlx.workspace = true
|
sqlx.workspace = true
|
||||||
|
inquire.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
pub mod secret;
|
||||||
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
20
harmony/src/domain/config/secret.rs
Normal file
20
harmony/src/domain/config/secret.rs
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
use harmony_secret_derive::Secret;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||||
|
pub struct OPNSenseFirewallCredentials {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO we need a better way to handle multiple "instances" of the same secret structure.
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||||
|
pub struct SshKeyPair {
|
||||||
|
pub private: String,
|
||||||
|
pub public: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||||
|
pub struct RedhatSecret {
|
||||||
|
pub pull_secret: String,
|
||||||
|
}
|
||||||
@@ -10,7 +10,7 @@ pub type HostGroup = Vec<PhysicalHost>;
|
|||||||
pub type SwitchGroup = Vec<Switch>;
|
pub type SwitchGroup = Vec<Switch>;
|
||||||
pub type FirewallGroup = Vec<PhysicalHost>;
|
pub type FirewallGroup = Vec<PhysicalHost>;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct PhysicalHost {
|
pub struct PhysicalHost {
|
||||||
pub id: Id,
|
pub id: Id,
|
||||||
pub category: HostCategory,
|
pub category: HostCategory,
|
||||||
@@ -173,6 +173,10 @@ impl PhysicalHost {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_mac_address(&self) -> Vec<MacAddress> {
|
||||||
|
self.network.iter().map(|nic| nic.mac_address).collect()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn label(mut self, name: String, value: String) -> Self {
|
pub fn label(mut self, name: String, value: String) -> Self {
|
||||||
self.labels.push(Label { name, value });
|
self.labels.push(Label { name, value });
|
||||||
self
|
self
|
||||||
@@ -221,15 +225,6 @@ impl PhysicalHost {
|
|||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for PhysicalHost {
|
|
||||||
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(new, Serialize)]
|
#[derive(new, Serialize)]
|
||||||
pub struct ManualManagementInterface;
|
pub struct ManualManagementInterface;
|
||||||
|
|
||||||
@@ -273,7 +268,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub enum HostCategory {
|
pub enum HostCategory {
|
||||||
Server,
|
Server,
|
||||||
Firewall,
|
Firewall,
|
||||||
@@ -291,7 +286,7 @@ pub struct Switch {
|
|||||||
_management_interface: NetworkInterface,
|
_management_interface: NetworkInterface,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, new, Clone, Serialize)]
|
#[derive(Debug, new, Clone, Serialize, Deserialize)]
|
||||||
pub struct Label {
|
pub struct Label {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub value: String,
|
pub value: String,
|
||||||
|
|||||||
@@ -142,6 +142,12 @@ impl From<PreparationError> for InterpretError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<harmony_secret::SecretStoreError> for InterpretError {
|
||||||
|
fn from(value: harmony_secret::SecretStoreError) -> Self {
|
||||||
|
InterpretError::new(format!("Interpret error : {value}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<ExecutorError> for InterpretError {
|
impl From<ExecutorError> for InterpretError {
|
||||||
fn from(value: ExecutorError) -> Self {
|
fn from(value: ExecutorError) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ impl InventoryFilter {
|
|||||||
|
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::hardware::{ManagementInterface, ManualManagementInterface};
|
use crate::hardware::{ManagementInterface, ManualManagementInterface};
|
||||||
|
|
||||||
@@ -61,3 +62,11 @@ impl Inventory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize, sqlx::Type)]
|
||||||
|
pub enum HostRole {
|
||||||
|
Bootstrap,
|
||||||
|
ControlPlane,
|
||||||
|
Worker,
|
||||||
|
Storage,
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use crate::hardware::PhysicalHost;
|
use crate::{hardware::PhysicalHost, interpret::InterpretError, inventory::HostRole};
|
||||||
|
|
||||||
/// Errors that can occur within the repository layer.
|
/// Errors that can occur within the repository layer.
|
||||||
#[derive(thiserror::Error, Debug)]
|
#[derive(thiserror::Error, Debug)]
|
||||||
@@ -15,6 +15,12 @@ pub enum RepoError {
|
|||||||
ConnectionFailed(String),
|
ConnectionFailed(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<RepoError> for InterpretError {
|
||||||
|
fn from(value: RepoError) -> Self {
|
||||||
|
InterpretError::new(format!("Interpret error : {value}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// --- Trait and Implementation ---
|
// --- Trait and Implementation ---
|
||||||
|
|
||||||
/// Defines the contract for inventory persistence.
|
/// Defines the contract for inventory persistence.
|
||||||
@@ -22,4 +28,11 @@ pub enum RepoError {
|
|||||||
pub trait InventoryRepository: Send + Sync + 'static {
|
pub trait InventoryRepository: Send + Sync + 'static {
|
||||||
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>;
|
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>;
|
||||||
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>;
|
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>;
|
||||||
|
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError>;
|
||||||
|
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError>;
|
||||||
|
async fn save_role_mapping(
|
||||||
|
&self,
|
||||||
|
role: &HostRole,
|
||||||
|
host: &PhysicalHost,
|
||||||
|
) -> Result<(), RepoError>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,6 +69,26 @@ impl K8sclient for HAClusterTopology {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HAClusterTopology {
|
impl HAClusterTopology {
|
||||||
|
// TODO this is a hack to avoid refactoring
|
||||||
|
pub fn get_cluster_name(&self) -> String {
|
||||||
|
self.domain_name
|
||||||
|
.split(".")
|
||||||
|
.next()
|
||||||
|
.expect("Cluster domain name must not be empty")
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_cluster_base_domain(&self) -> String {
|
||||||
|
let base_domain = self
|
||||||
|
.domain_name
|
||||||
|
.strip_prefix(&self.get_cluster_name())
|
||||||
|
.expect("cluster domain must start with cluster name");
|
||||||
|
base_domain
|
||||||
|
.strip_prefix(".")
|
||||||
|
.unwrap_or(base_domain)
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn autoload() -> Self {
|
pub fn autoload() -> Self {
|
||||||
let dummy_infra = Arc::new(DummyInfra {});
|
let dummy_infra = Arc::new(DummyInfra {});
|
||||||
let dummy_host = LogicalHost {
|
let dummy_host = LogicalHost {
|
||||||
@@ -161,6 +181,14 @@ impl DhcpServer for HAClusterTopology {
|
|||||||
self.dhcp_server.set_pxe_options(options).await
|
self.dhcp_server.set_pxe_options(options).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn set_dhcp_range(
|
||||||
|
&self,
|
||||||
|
start: &IpAddress,
|
||||||
|
end: &IpAddress,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
self.dhcp_server.set_dhcp_range(start, end).await
|
||||||
|
}
|
||||||
|
|
||||||
fn get_ip(&self) -> IpAddress {
|
fn get_ip(&self) -> IpAddress {
|
||||||
self.dhcp_server.get_ip()
|
self.dhcp_server.get_ip()
|
||||||
}
|
}
|
||||||
@@ -209,8 +237,12 @@ impl Router for HAClusterTopology {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl HttpServer for HAClusterTopology {
|
impl HttpServer for HAClusterTopology {
|
||||||
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
|
async fn serve_files(
|
||||||
self.http_server.serve_files(url).await
|
&self,
|
||||||
|
url: &Url,
|
||||||
|
remote_path: &Option<String>,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
self.http_server.serve_files(url, remote_path).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
|
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
|
||||||
@@ -298,6 +330,13 @@ impl DhcpServer for DummyInfra {
|
|||||||
async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> {
|
async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> {
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
|
async fn set_dhcp_range(
|
||||||
|
&self,
|
||||||
|
start: &IpAddress,
|
||||||
|
end: &IpAddress,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
fn get_ip(&self) -> IpAddress {
|
fn get_ip(&self) -> IpAddress {
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
@@ -362,7 +401,11 @@ impl TftpServer for DummyInfra {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl HttpServer for DummyInfra {
|
impl HttpServer for DummyInfra {
|
||||||
async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> {
|
async fn serve_files(
|
||||||
|
&self,
|
||||||
|
_url: &Url,
|
||||||
|
_remote_path: &Option<String>,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> {
|
async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> {
|
||||||
|
|||||||
@@ -5,7 +5,11 @@ use harmony_types::net::IpAddress;
|
|||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait HttpServer: Send + Sync {
|
pub trait HttpServer: Send + Sync {
|
||||||
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
|
async fn serve_files(
|
||||||
|
&self,
|
||||||
|
url: &Url,
|
||||||
|
remote_path: &Option<String>,
|
||||||
|
) -> Result<(), ExecutorError>;
|
||||||
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>;
|
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>;
|
||||||
fn get_ip(&self) -> IpAddress;
|
fn get_ip(&self) -> IpAddress;
|
||||||
|
|
||||||
|
|||||||
@@ -11,15 +11,21 @@ use super::{LogicalHost, k8s::K8sClient};
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DHCPStaticEntry {
|
pub struct DHCPStaticEntry {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub mac: MacAddress,
|
pub mac: Vec<MacAddress>,
|
||||||
pub ip: Ipv4Addr,
|
pub ip: Ipv4Addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for DHCPStaticEntry {
|
impl std::fmt::Display for DHCPStaticEntry {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let mac = self
|
||||||
|
.mac
|
||||||
|
.iter()
|
||||||
|
.map(|m| m.to_string())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(",");
|
||||||
f.write_fmt(format_args!(
|
f.write_fmt(format_args!(
|
||||||
"DHCPStaticEntry : name {}, mac {}, ip {}",
|
"DHCPStaticEntry : name {}, mac {}, ip {}",
|
||||||
self.name, self.mac, self.ip
|
self.name, mac, self.ip
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -41,6 +47,7 @@ impl std::fmt::Debug for dyn Firewall {
|
|||||||
pub struct NetworkDomain {
|
pub struct NetworkDomain {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait K8sclient: Send + Sync {
|
pub trait K8sclient: Send + Sync {
|
||||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
|
||||||
@@ -59,6 +66,8 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug {
|
|||||||
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
||||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
||||||
async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>;
|
async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>;
|
||||||
|
async fn set_dhcp_range(&self, start: &IpAddress, end: &IpAddress)
|
||||||
|
-> Result<(), ExecutorError>;
|
||||||
fn get_ip(&self) -> IpAddress;
|
fn get_ip(&self) -> IpAddress;
|
||||||
fn get_host(&self) -> LogicalHost;
|
fn get_host(&self) -> LogicalHost;
|
||||||
async fn commit_config(&self) -> Result<(), ExecutorError>;
|
async fn commit_config(&self) -> Result<(), ExecutorError>;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
hardware::PhysicalHost,
|
hardware::PhysicalHost,
|
||||||
inventory::{InventoryRepository, RepoError},
|
inventory::{HostRole, InventoryRepository, RepoError},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
@@ -46,20 +46,103 @@ impl InventoryRepository for SqliteInventoryRepository {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> {
|
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> {
|
||||||
let _row = sqlx::query_as!(
|
let row = sqlx::query_as!(
|
||||||
DbHost,
|
DbHost,
|
||||||
r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#,
|
r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#,
|
||||||
host_id
|
host_id
|
||||||
)
|
)
|
||||||
.fetch_optional(&self.pool)
|
.fetch_optional(&self.pool)
|
||||||
.await?;
|
.await?;
|
||||||
todo!()
|
|
||||||
|
Ok(row.map(|r| r.data.0))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError> {
|
||||||
|
let db_hosts = sqlx::query_as!(
|
||||||
|
DbHost,
|
||||||
|
r#"
|
||||||
|
SELECT
|
||||||
|
p1.id,
|
||||||
|
p1.version_id,
|
||||||
|
p1.data as "data: Json<PhysicalHost>"
|
||||||
|
FROM
|
||||||
|
physical_hosts p1
|
||||||
|
INNER JOIN (
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
MAX(version_id) AS max_version
|
||||||
|
FROM
|
||||||
|
physical_hosts
|
||||||
|
GROUP BY
|
||||||
|
id
|
||||||
|
) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version
|
||||||
|
"#
|
||||||
|
)
|
||||||
|
.fetch_all(&self.pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let hosts = db_hosts.into_iter().map(|row| row.data.0).collect();
|
||||||
|
|
||||||
|
Ok(hosts)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn save_role_mapping(
|
||||||
|
&self,
|
||||||
|
role: &HostRole,
|
||||||
|
host: &PhysicalHost,
|
||||||
|
) -> Result<(), RepoError> {
|
||||||
|
let host_id = host.id.to_string();
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
r#"
|
||||||
|
INSERT INTO host_role_mapping (host_id, role)
|
||||||
|
VALUES (?, ?)
|
||||||
|
"#,
|
||||||
|
host_id,
|
||||||
|
role
|
||||||
|
)
|
||||||
|
.execute(&self.pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("Saved role mapping for host '{}' as '{:?}'", host.id, role);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError> {
|
||||||
|
struct HostIdRow {
|
||||||
|
host_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
let role_str = format!("{:?}", role);
|
||||||
|
|
||||||
|
let host_id_rows = sqlx::query_as!(
|
||||||
|
HostIdRow,
|
||||||
|
"SELECT host_id FROM host_role_mapping WHERE role = ?",
|
||||||
|
role_str
|
||||||
|
)
|
||||||
|
.fetch_all(&self.pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut hosts = Vec::with_capacity(host_id_rows.len());
|
||||||
|
for row in host_id_rows {
|
||||||
|
match self.get_latest_by_id(&row.host_id).await? {
|
||||||
|
Some(host) => hosts.push(host),
|
||||||
|
None => {
|
||||||
|
log::warn!(
|
||||||
|
"Found a role mapping for host_id '{}', but the host does not exist in the physical_hosts table. This may indicate a data integrity issue.",
|
||||||
|
row.host_id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(hosts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use sqlx::types::Json;
|
use sqlx::types::Json;
|
||||||
struct DbHost {
|
struct DbHost {
|
||||||
data: Json<PhysicalHost>,
|
data: Json<PhysicalHost>,
|
||||||
id: Id,
|
id: String,
|
||||||
version_id: Id,
|
version_id: String,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,13 +17,13 @@ impl DhcpServer for OPNSenseFirewall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> {
|
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> {
|
||||||
let mac: String = String::from(&entry.mac);
|
let mac: Vec<String> = entry.mac.iter().map(MacAddress::to_string).collect();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
writable_opnsense
|
writable_opnsense
|
||||||
.dhcp()
|
.dhcp()
|
||||||
.add_static_mapping(&mac, entry.ip, &entry.name)
|
.add_static_mapping(&mac, &entry.ip, &entry.name)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,4 +68,19 @@ impl DhcpServer for OPNSenseFirewall {
|
|||||||
ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}"))
|
ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn set_dhcp_range(
|
||||||
|
&self,
|
||||||
|
start: &IpAddress,
|
||||||
|
end: &IpAddress,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
|
writable_opnsense
|
||||||
|
.dhcp()
|
||||||
|
.set_dhcp_range(&start.to_string(), &end.to_string())
|
||||||
|
.await
|
||||||
|
.map_err(|dhcp_error| {
|
||||||
|
ExecutorError::UnexpectedError(format!("Failed to set_dhcp_range : {dhcp_error}"))
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,21 +12,22 @@ use super::OPNSenseFirewall;
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl DnsServer for OPNSenseFirewall {
|
impl DnsServer for OPNSenseFirewall {
|
||||||
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
todo!("Refactor this to use dnsmasq")
|
||||||
let mut dns = writable_opnsense.dns();
|
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
let hosts = hosts
|
// let mut dns = writable_opnsense.dns();
|
||||||
.iter()
|
// let hosts = hosts
|
||||||
.map(|h| {
|
// .iter()
|
||||||
Host::new(
|
// .map(|h| {
|
||||||
h.host.clone(),
|
// Host::new(
|
||||||
h.domain.clone(),
|
// h.host.clone(),
|
||||||
h.record_type.to_string(),
|
// h.domain.clone(),
|
||||||
h.value.to_string(),
|
// h.record_type.to_string(),
|
||||||
)
|
// h.value.to_string(),
|
||||||
})
|
// )
|
||||||
.collect();
|
// })
|
||||||
dns.register_hosts(hosts);
|
// .collect();
|
||||||
Ok(())
|
// dns.add_static_mapping(hosts);
|
||||||
|
// Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_record(
|
fn remove_record(
|
||||||
@@ -38,25 +39,26 @@ impl DnsServer for OPNSenseFirewall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn list_records(&self) -> Vec<crate::topology::DnsRecord> {
|
async fn list_records(&self) -> Vec<crate::topology::DnsRecord> {
|
||||||
self.opnsense_config
|
todo!("Refactor this to use dnsmasq")
|
||||||
.write()
|
// self.opnsense_config
|
||||||
.await
|
// .write()
|
||||||
.dns()
|
// .await
|
||||||
.get_hosts()
|
// .dns()
|
||||||
.iter()
|
// .get_hosts()
|
||||||
.map(|h| DnsRecord {
|
// .iter()
|
||||||
host: h.hostname.clone(),
|
// .map(|h| DnsRecord {
|
||||||
domain: h.domain.clone(),
|
// host: h.hostname.clone(),
|
||||||
record_type: h
|
// domain: h.domain.clone(),
|
||||||
.rr
|
// record_type: h
|
||||||
.parse()
|
// .rr
|
||||||
.expect("received invalid record type {h.rr} from opnsense"),
|
// .parse()
|
||||||
value: h
|
// .expect("received invalid record type {h.rr} from opnsense"),
|
||||||
.server
|
// value: h
|
||||||
.parse()
|
// .server
|
||||||
.expect("received invalid ipv4 record from opnsense {h.server}"),
|
// .parse()
|
||||||
})
|
// .expect("received invalid ipv4 record from opnsense {h.server}"),
|
||||||
.collect()
|
// })
|
||||||
|
// .collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_ip(&self) -> IpAddress {
|
fn get_ip(&self) -> IpAddress {
|
||||||
@@ -68,11 +70,12 @@ impl DnsServer for OPNSenseFirewall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
|
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
|
||||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
todo!("Refactor this to use dnsmasq")
|
||||||
let mut dns = writable_opnsense.dns();
|
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
dns.register_dhcp_leases(register);
|
// let mut dns = writable_opnsense.dns();
|
||||||
|
// dns.register_dhcp_leases(register);
|
||||||
Ok(())
|
//
|
||||||
|
// Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
|||||||
@@ -10,13 +10,21 @@ const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http";
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl HttpServer for OPNSenseFirewall {
|
impl HttpServer for OPNSenseFirewall {
|
||||||
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
|
async fn serve_files(
|
||||||
|
&self,
|
||||||
|
url: &Url,
|
||||||
|
remote_path: &Option<String>,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
let config = self.opnsense_config.read().await;
|
let config = self.opnsense_config.read().await;
|
||||||
info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}");
|
info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}");
|
||||||
|
let remote_upload_path = remote_path
|
||||||
|
.clone()
|
||||||
|
.map(|r| format!("{OPNSENSE_HTTP_ROOT_PATH}/{r}"))
|
||||||
|
.unwrap_or(OPNSENSE_HTTP_ROOT_PATH.to_string());
|
||||||
match url {
|
match url {
|
||||||
Url::LocalFolder(path) => {
|
Url::LocalFolder(path) => {
|
||||||
config
|
config
|
||||||
.upload_files(path, OPNSENSE_HTTP_ROOT_PATH)
|
.upload_files(path, &remote_upload_path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
|
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::info;
|
use log::{info, trace};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -22,6 +22,8 @@ pub struct DhcpScore {
|
|||||||
pub filename: Option<String>,
|
pub filename: Option<String>,
|
||||||
pub filename64: Option<String>,
|
pub filename64: Option<String>,
|
||||||
pub filenameipxe: Option<String>,
|
pub filenameipxe: Option<String>,
|
||||||
|
pub dhcp_range: (IpAddress, IpAddress),
|
||||||
|
pub domain: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
|
impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
|
||||||
@@ -52,48 +54,6 @@ impl DhcpInterpret {
|
|||||||
status: InterpretStatus::QUEUED,
|
status: InterpretStatus::QUEUED,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async fn add_static_entries<D: DhcpServer>(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
dhcp_server: &D,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let dhcp_entries: Vec<DHCPStaticEntry> = self
|
|
||||||
.score
|
|
||||||
.host_binding
|
|
||||||
.iter()
|
|
||||||
.map(|binding| {
|
|
||||||
let ip = match binding.logical_host.ip {
|
|
||||||
std::net::IpAddr::V4(ipv4) => ipv4,
|
|
||||||
std::net::IpAddr::V6(_) => {
|
|
||||||
unimplemented!("DHCPStaticEntry only supports ipv4 at the moment")
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
DHCPStaticEntry {
|
|
||||||
name: binding.logical_host.name.clone(),
|
|
||||||
mac: binding.physical_host.cluster_mac(),
|
|
||||||
ip,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
info!("DHCPStaticEntry : {:?}", dhcp_entries);
|
|
||||||
|
|
||||||
info!("DHCP server : {:?}", dhcp_server);
|
|
||||||
|
|
||||||
let number_new_entries = dhcp_entries.len();
|
|
||||||
|
|
||||||
for entry in dhcp_entries.into_iter() {
|
|
||||||
match dhcp_server.add_static_mapping(&entry).await {
|
|
||||||
Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry),
|
|
||||||
Err(_) => todo!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Outcome::new(
|
|
||||||
InterpretStatus::SUCCESS,
|
|
||||||
format!("Dhcp Interpret registered {} entries", number_new_entries),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn set_pxe_options<D: DhcpServer>(
|
async fn set_pxe_options<D: DhcpServer>(
|
||||||
&self,
|
&self,
|
||||||
@@ -124,7 +84,7 @@ impl DhcpInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
|
impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::OPNSenseDHCP
|
InterpretName::OPNSenseDHCP
|
||||||
}
|
}
|
||||||
@@ -149,8 +109,16 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
|
|||||||
info!("Executing DhcpInterpret on inventory {inventory:?}");
|
info!("Executing DhcpInterpret on inventory {inventory:?}");
|
||||||
|
|
||||||
self.set_pxe_options(inventory, topology).await?;
|
self.set_pxe_options(inventory, topology).await?;
|
||||||
|
topology
|
||||||
|
.set_dhcp_range(&self.score.dhcp_range.0, &self.score.dhcp_range.1)
|
||||||
|
.await?;
|
||||||
|
|
||||||
self.add_static_entries(inventory, topology).await?;
|
DhcpHostBindingScore {
|
||||||
|
host_binding: self.score.host_binding.clone(),
|
||||||
|
domain: self.score.domain.clone(),
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
topology.commit_config().await?;
|
topology.commit_config().await?;
|
||||||
|
|
||||||
@@ -160,3 +128,120 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, new, Clone, Serialize)]
|
||||||
|
pub struct DhcpHostBindingScore {
|
||||||
|
pub host_binding: Vec<HostBinding>,
|
||||||
|
pub domain: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + DhcpServer> Score<T> for DhcpHostBindingScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(DhcpHostBindingInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"DhcpHostBindingScore".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct DhcpHostBindingInterpret {
|
||||||
|
score: DhcpHostBindingScore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DhcpHostBindingInterpret {
|
||||||
|
async fn add_static_entries<D: DhcpServer>(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
dhcp_server: &D,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let dhcp_entries: Vec<DHCPStaticEntry> = self
|
||||||
|
.score
|
||||||
|
.host_binding
|
||||||
|
.iter()
|
||||||
|
.map(|binding| {
|
||||||
|
let ip = match binding.logical_host.ip {
|
||||||
|
std::net::IpAddr::V4(ipv4) => ipv4,
|
||||||
|
std::net::IpAddr::V6(_) => {
|
||||||
|
unimplemented!("DHCPStaticEntry only supports ipv4 at the moment")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let name = if let Some(domain) = self.score.domain.as_ref() {
|
||||||
|
format!("{}.{}", binding.logical_host.name, domain)
|
||||||
|
} else {
|
||||||
|
binding.logical_host.name.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
DHCPStaticEntry {
|
||||||
|
name,
|
||||||
|
mac: binding.physical_host.get_mac_address(),
|
||||||
|
ip,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
info!("DHCPStaticEntry : {:?}", dhcp_entries);
|
||||||
|
|
||||||
|
trace!("DHCP server : {:?}", dhcp_server);
|
||||||
|
|
||||||
|
let number_new_entries = dhcp_entries.len();
|
||||||
|
|
||||||
|
for entry in dhcp_entries.into_iter() {
|
||||||
|
match dhcp_server.add_static_mapping(&entry).await {
|
||||||
|
Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry),
|
||||||
|
Err(_) => todo!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
format!("Dhcp Interpret registered {} entries", number_new_entries),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("DhcpHostBindingInterpret")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> crate::domain::data::Version {
|
||||||
|
Version::from("1.0.0").unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
info!(
|
||||||
|
"Executing DhcpHostBindingInterpret on {} bindings",
|
||||||
|
self.score.host_binding.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
self.add_static_entries(inventory, topology).await?;
|
||||||
|
|
||||||
|
topology.commit_config().await?;
|
||||||
|
|
||||||
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
format!(
|
||||||
|
"Dhcp Host Binding Interpret execution successful on {} hosts",
|
||||||
|
self.score.host_binding.len()
|
||||||
|
),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,14 +3,14 @@ use derive_new::new;
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::{FileContent, Version},
|
data::{FileContent, FilePath, Version},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HttpServer, Topology},
|
topology::{HttpServer, Topology},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
use harmony_types::{id::Id, net::MacAddress};
|
||||||
|
|
||||||
/// Configure an HTTP server that is provided by the Topology
|
/// Configure an HTTP server that is provided by the Topology
|
||||||
///
|
///
|
||||||
@@ -25,8 +25,11 @@ use harmony_types::net::Url;
|
|||||||
/// ```
|
/// ```
|
||||||
#[derive(Debug, new, Clone, Serialize)]
|
#[derive(Debug, new, Clone, Serialize)]
|
||||||
pub struct StaticFilesHttpScore {
|
pub struct StaticFilesHttpScore {
|
||||||
|
// TODO this should be split in two scores, one for folder and
|
||||||
|
// other for files
|
||||||
pub folder_to_serve: Option<Url>,
|
pub folder_to_serve: Option<Url>,
|
||||||
pub files: Vec<FileContent>,
|
pub files: Vec<FileContent>,
|
||||||
|
pub remote_path: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
|
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
|
||||||
@@ -54,7 +57,9 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
|
|||||||
http_server.ensure_initialized().await?;
|
http_server.ensure_initialized().await?;
|
||||||
// http_server.set_ip(topology.router.get_gateway()).await?;
|
// http_server.set_ip(topology.router.get_gateway()).await?;
|
||||||
if let Some(folder) = self.score.folder_to_serve.as_ref() {
|
if let Some(folder) = self.score.folder_to_serve.as_ref() {
|
||||||
http_server.serve_files(folder).await?;
|
http_server
|
||||||
|
.serve_files(folder, &self.score.remote_path)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
for f in self.score.files.iter() {
|
for f in self.score.files.iter() {
|
||||||
@@ -91,3 +96,34 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, new, Clone, Serialize)]
|
||||||
|
pub struct IPxeMacBootFileScore {
|
||||||
|
pub content: String,
|
||||||
|
pub mac_address: Vec<MacAddress>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + HttpServer> Score<T> for IPxeMacBootFileScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"IPxeMacBootFileScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
StaticFilesHttpScore {
|
||||||
|
remote_path: None,
|
||||||
|
folder_to_serve: None,
|
||||||
|
files: self
|
||||||
|
.mac_address
|
||||||
|
.iter()
|
||||||
|
.map(|mac| FileContent {
|
||||||
|
path: FilePath::Relative(format!(
|
||||||
|
"byMAC/01-{}.ipxe",
|
||||||
|
mac.to_string().replace(":", "-")
|
||||||
|
)),
|
||||||
|
content: self.content.clone(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
.create_interpret()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,11 +18,11 @@ use harmony_types::id::Id;
|
|||||||
/// This will allow us to register/update hosts running harmony_inventory_agent
|
/// This will allow us to register/update hosts running harmony_inventory_agent
|
||||||
/// from LAN in the Harmony inventory
|
/// from LAN in the Harmony inventory
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct DiscoverInventoryAgentScore {
|
pub struct LaunchDiscoverInventoryAgentScore {
|
||||||
pub discovery_timeout: Option<u64>,
|
pub discovery_timeout: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for DiscoverInventoryAgentScore {
|
impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"DiscoverInventoryAgentScore".to_string()
|
"DiscoverInventoryAgentScore".to_string()
|
||||||
}
|
}
|
||||||
@@ -36,7 +36,7 @@ impl<T: Topology> Score<T> for DiscoverInventoryAgentScore {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct DiscoverInventoryAgentInterpret {
|
struct DiscoverInventoryAgentInterpret {
|
||||||
score: DiscoverInventoryAgentScore,
|
score: LaunchDiscoverInventoryAgentScore,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -46,6 +46,13 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
|||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
_topology: &T,
|
_topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
match self.score.discovery_timeout {
|
||||||
|
Some(timeout) => info!("Discovery agent will wait for {timeout} seconds"),
|
||||||
|
None => info!(
|
||||||
|
"Discovery agent will wait forever in the background, go on and enjoy this delicious inventory."
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
harmony_inventory_agent::local_presence::discover_agents(
|
harmony_inventory_agent::local_presence::discover_agents(
|
||||||
self.score.discovery_timeout,
|
self.score.discovery_timeout,
|
||||||
|event: DiscoveryEvent| -> Result<(), String> {
|
|event: DiscoveryEvent| -> Result<(), String> {
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::Topology,
|
|
||||||
};
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
|
|
||||||
#[derive(Debug, new, Clone, Serialize)]
|
|
||||||
pub struct IpxeScore {
|
|
||||||
//files_to_serve: Url,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for IpxeScore {
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(IpxeInterpret::new(self.clone()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"IpxeScore".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, new, Clone)]
|
|
||||||
pub struct IpxeInterpret {
|
|
||||||
_score: IpxeScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology> Interpret<T> for IpxeInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
_topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
/*
|
|
||||||
let http_server = &topology.http_server;
|
|
||||||
http_server.ensure_initialized().await?;
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"Http Server running and serving files from {}",
|
|
||||||
self.score.files_to_serve
|
|
||||||
)))
|
|
||||||
*/
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Ipxe
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,7 +6,6 @@ pub mod dummy;
|
|||||||
pub mod helm;
|
pub mod helm;
|
||||||
pub mod http;
|
pub mod http;
|
||||||
pub mod inventory;
|
pub mod inventory;
|
||||||
pub mod ipxe;
|
|
||||||
pub mod k3d;
|
pub mod k3d;
|
||||||
pub mod k8s;
|
pub mod k8s;
|
||||||
pub mod lamp;
|
pub mod lamp;
|
||||||
|
|||||||
@@ -37,21 +37,23 @@ impl OKDBootstrapDhcpScore {
|
|||||||
.clone(),
|
.clone(),
|
||||||
});
|
});
|
||||||
// TODO refactor this so it is not copy pasted from dhcp.rs
|
// TODO refactor this so it is not copy pasted from dhcp.rs
|
||||||
Self {
|
todo!("Add dhcp range")
|
||||||
dhcp_score: DhcpScore::new(
|
// Self {
|
||||||
host_binding,
|
// dhcp_score: DhcpScore::new(
|
||||||
// TODO : we should add a tftp server to the topology instead of relying on the
|
// host_binding,
|
||||||
// router address, this is leaking implementation details
|
// // TODO : we should add a tftp server to the topology instead of relying on the
|
||||||
Some(topology.router.get_gateway()),
|
// // router address, this is leaking implementation details
|
||||||
None, // To allow UEFI boot we cannot provide a legacy file
|
// Some(topology.router.get_gateway()),
|
||||||
Some("undionly.kpxe".to_string()),
|
// None, // To allow UEFI boot we cannot provide a legacy file
|
||||||
Some("ipxe.efi".to_string()),
|
// Some("undionly.kpxe".to_string()),
|
||||||
Some(format!(
|
// Some("ipxe.efi".to_string()),
|
||||||
"http://{}:8080/boot.ipxe",
|
// Some(format!(
|
||||||
topology.router.get_gateway()
|
// "http://{}:8080/boot.ipxe",
|
||||||
)),
|
// topology.router.get_gateway()
|
||||||
),
|
// )),
|
||||||
}
|
// (self.),
|
||||||
|
// ),
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ impl OKDBootstrapLoadBalancerScore {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
|
fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
|
||||||
let mut backend: Vec<_> = topology
|
let mut backend: Vec<_> = topology
|
||||||
.control_plane
|
.control_plane
|
||||||
@@ -63,6 +64,14 @@ impl OKDBootstrapLoadBalancerScore {
|
|||||||
port,
|
port,
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
topology.workers.iter().for_each(|worker| {
|
||||||
|
backend.push(BackendServer {
|
||||||
|
address: worker.ip.to_string(),
|
||||||
|
port,
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
backend.push(BackendServer {
|
backend.push(BackendServer {
|
||||||
address: topology.bootstrap_host.ip.to_string(),
|
address: topology.bootstrap_host.ip.to_string(),
|
||||||
port,
|
port,
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
use std::net::Ipv4Addr;
|
||||||
|
|
||||||
|
use harmony_types::net::IpAddress;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -44,6 +47,16 @@ impl OKDDhcpScore {
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let dhcp_server_ip = match topology.dhcp_server.get_ip() {
|
||||||
|
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
|
||||||
|
std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO this could overflow, we should use proper subnet maths here instead of an ip
|
||||||
|
// address and guessing the subnet size from there
|
||||||
|
let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100);
|
||||||
|
let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
// TODO : we should add a tftp server to the topology instead of relying on the
|
// TODO : we should add a tftp server to the topology instead of relying on the
|
||||||
// router address, this is leaking implementation details
|
// router address, this is leaking implementation details
|
||||||
@@ -57,6 +70,8 @@ impl OKDDhcpScore {
|
|||||||
"http://{}:8080/boot.ipxe",
|
"http://{}:8080/boot.ipxe",
|
||||||
topology.router.get_gateway()
|
topology.router.get_gateway()
|
||||||
)),
|
)),
|
||||||
|
dhcp_range: (IpAddress::from(start), IpAddress::from(end)),
|
||||||
|
domain: Some(topology.domain_name.clone()),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -44,24 +44,43 @@
|
|||||||
//! which must be configured on host AND switch to connect properly.
|
//! which must be configured on host AND switch to connect properly.
|
||||||
//!
|
//!
|
||||||
//! Configuration knobs
|
//! Configuration knobs
|
||||||
//! - lan_cidr: CIDR to scan/allow for discovery endpoints.
|
|
||||||
//! - public_domain: External wildcard/apps domain (e.g., apps.example.com).
|
//! - public_domain: External wildcard/apps domain (e.g., apps.example.com).
|
||||||
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
|
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
|
||||||
|
|
||||||
|
use std::{fmt::Write, path::PathBuf, process::ExitStatus};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use harmony_macros::ip;
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::{id::Id, net::Url};
|
||||||
use log::info;
|
use log::{debug, error, info, warn};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{
|
||||||
|
fs::File,
|
||||||
|
io::{AsyncReadExt, AsyncWriteExt},
|
||||||
|
process::Command,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
|
data::{FileContent, FilePath, Version},
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
instrumentation::{HarmonyEvent, instrument},
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
|
okd::{
|
||||||
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
|
dns::OKDDnsScore,
|
||||||
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
|
},
|
||||||
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{DnsRecord, DnsRecordType, DnsServer, Topology},
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
@@ -69,17 +88,10 @@ use crate::{
|
|||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, new)]
|
#[derive(Debug, Clone, Serialize, Deserialize, new)]
|
||||||
pub struct OKDInstallationScore {
|
pub struct OKDInstallationScore {}
|
||||||
/// The LAN CIDR where discovery endpoints live (e.g., 192.168.10.0/24)
|
|
||||||
pub lan_cidr: String,
|
|
||||||
/// Public external domain (e.g., example.com). Used for api/apps wildcard, etc.
|
|
||||||
pub public_domain: String,
|
|
||||||
/// Internal cluster domain (e.g., harmony.mcd). Used for internal svc/ingress and DNS.
|
|
||||||
pub internal_domain: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + DnsServer + 'static> Score<T> for OKDInstallationScore {
|
impl Score<HAClusterTopology> for OKDInstallationScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDInstallationInterpret::new(self.clone()))
|
Box::new(OKDInstallationInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -109,86 +121,71 @@ impl OKDInstallationInterpret {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_inventory_phase<T: Topology + DnsServer>(
|
async fn run_inventory_phase(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<(), InterpretError> {
|
) -> Result<(), InterpretError> {
|
||||||
// 1) Prepare DNS and DHCP lease registration (optional)
|
OKDSetup01InventoryScore::new()
|
||||||
let dns_score = OKDSetup01InventoryDnsScore::new(
|
.interpret(inventory, topology)
|
||||||
self.score.internal_domain.clone(),
|
.await?;
|
||||||
self.score.public_domain.clone(),
|
|
||||||
Some(true), // register_dhcp_leases
|
|
||||||
);
|
|
||||||
dns_score.interpret(inventory, topology).await?;
|
|
||||||
|
|
||||||
// 2) Serve default iPXE + Kickstart and poll discovery
|
|
||||||
let discovery_score = OKDSetup01InventoryScore::new(self.score.lan_cidr.clone());
|
|
||||||
discovery_score.interpret(inventory, topology).await?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_bootstrap_phase<T: Topology + DnsServer>(
|
async fn run_bootstrap_phase(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<(), InterpretError> {
|
) -> Result<(), InterpretError> {
|
||||||
// Select and provision bootstrap
|
OKDSetup02BootstrapScore::new()
|
||||||
let bootstrap_score = OKDSetup02BootstrapScore::new(
|
.interpret(inventory, topology)
|
||||||
self.score.public_domain.clone(),
|
.await?;
|
||||||
self.score.internal_domain.clone(),
|
|
||||||
);
|
|
||||||
bootstrap_score.interpret(inventory, topology).await?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_control_plane_phase<T: Topology + DnsServer>(
|
async fn run_control_plane_phase(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<(), InterpretError> {
|
) -> Result<(), InterpretError> {
|
||||||
let control_plane_score = OKDSetup03ControlPlaneScore::new();
|
let control_plane_score = OKDSetup03ControlPlaneScore::new();
|
||||||
control_plane_score.interpret(inventory, topology).await?;
|
control_plane_score.interpret(inventory, topology).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_workers_phase<T: Topology + DnsServer>(
|
async fn run_workers_phase(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<(), InterpretError> {
|
) -> Result<(), InterpretError> {
|
||||||
let workers_score = OKDSetup04WorkersScore::new();
|
let workers_score = OKDSetup04WorkersScore::new();
|
||||||
workers_score.interpret(inventory, topology).await?;
|
workers_score.interpret(inventory, topology).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_sanity_phase<T: Topology + DnsServer>(
|
async fn run_sanity_phase(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<(), InterpretError> {
|
) -> Result<(), InterpretError> {
|
||||||
let sanity_score = OKDSetup05SanityCheckScore::new();
|
let sanity_score = OKDSetup05SanityCheckScore::new();
|
||||||
sanity_score.interpret(inventory, topology).await?;
|
sanity_score.interpret(inventory, topology).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_report_phase<T: Topology + DnsServer>(
|
async fn run_report_phase(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<(), InterpretError> {
|
) -> Result<(), InterpretError> {
|
||||||
let report_score = OKDSetup06InstallationReportScore::new(
|
let report_score = OKDSetup06InstallationReportScore::new();
|
||||||
self.score.public_domain.clone(),
|
|
||||||
self.score.internal_domain.clone(),
|
|
||||||
);
|
|
||||||
report_score.interpret(inventory, topology).await?;
|
report_score.interpret(inventory, topology).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + DnsServer> Interpret<T> for OKDInstallationInterpret {
|
impl Interpret<HAClusterTopology> for OKDInstallationInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("OKDInstallationInterpret")
|
InterpretName::Custom("OKDInstallationInterpret")
|
||||||
}
|
}
|
||||||
@@ -208,14 +205,11 @@ impl<T: Topology + DnsServer> Interpret<T> for OKDInstallationInterpret {
|
|||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
instrument(HarmonyEvent::HarmonyStarted).ok();
|
instrument(HarmonyEvent::HarmonyStarted).ok();
|
||||||
|
|
||||||
info!(
|
info!("Starting OKD installation pipeline",);
|
||||||
"Starting OKD installation pipeline for public_domain={} internal_domain={} lan_cidr={}",
|
|
||||||
self.score.public_domain, self.score.internal_domain, self.score.lan_cidr
|
|
||||||
);
|
|
||||||
|
|
||||||
self.run_inventory_phase(inventory, topology).await?;
|
self.run_inventory_phase(inventory, topology).await?;
|
||||||
|
|
||||||
@@ -238,111 +232,6 @@ impl<T: Topology + DnsServer> Interpret<T> for OKDInstallationInterpret {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
|
||||||
// Step 01: Inventory DNS setup
|
|
||||||
// - Keep DHCP simple; optionally register dynamic leases into DNS.
|
|
||||||
// - Ensure base records for internal/public domains (api/api-int/apps wildcard).
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
|
||||||
struct OKDSetup01InventoryDnsScore {
|
|
||||||
internal_domain: String,
|
|
||||||
public_domain: String,
|
|
||||||
register_dhcp_leases: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + DnsServer> Score<T> for OKDSetup01InventoryDnsScore {
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(OKDSetup01InventoryDnsInterpret::new(self.clone()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"OKDSetup01InventoryDnsScore".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
struct OKDSetup01InventoryDnsInterpret {
|
|
||||||
score: OKDSetup01InventoryDnsScore,
|
|
||||||
version: Version,
|
|
||||||
status: InterpretStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDSetup01InventoryDnsInterpret {
|
|
||||||
pub fn new(score: OKDSetup01InventoryDnsScore) -> Self {
|
|
||||||
let version = Version::from("1.0.0").unwrap();
|
|
||||||
Self {
|
|
||||||
version,
|
|
||||||
score,
|
|
||||||
status: InterpretStatus::QUEUED,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_dns<T: DnsServer>(&self, dns: &T) -> Result<(), InterpretError> {
|
|
||||||
// Minimal records placeholders; real IPs are set elsewhere in the flow.
|
|
||||||
// We register the names early to ensure resolvability for clients relying on DNS.
|
|
||||||
let mut records: Vec<DnsRecord> = vec![
|
|
||||||
DnsRecord {
|
|
||||||
value: ip!("0.0.0.0"),
|
|
||||||
host: "api".to_string(),
|
|
||||||
domain: self.score.internal_domain.clone(),
|
|
||||||
record_type: DnsRecordType::A,
|
|
||||||
},
|
|
||||||
DnsRecord {
|
|
||||||
value: ip!("0.0.0.0"),
|
|
||||||
host: "api-int".to_string(),
|
|
||||||
domain: self.score.internal_domain.clone(),
|
|
||||||
record_type: DnsRecordType::A,
|
|
||||||
},
|
|
||||||
DnsRecord {
|
|
||||||
value: ip!("0.0.0.0"),
|
|
||||||
host: "*.apps.".to_string(),
|
|
||||||
domain: self.score.internal_domain.clone(),
|
|
||||||
record_type: DnsRecordType::A,
|
|
||||||
},
|
|
||||||
];
|
|
||||||
dns.ensure_hosts_registered(records.drain(..).collect())
|
|
||||||
.await?;
|
|
||||||
if let Some(register) = self.score.register_dhcp_leases {
|
|
||||||
dns.register_dhcp_leases(register).await?;
|
|
||||||
}
|
|
||||||
dns.commit_config().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + DnsServer> Interpret<T> for OKDSetup01InventoryDnsInterpret {
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OKDSetup01InventoryDns")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
self.version.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
self.status.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
info!("Ensuring base DNS and DHCP lease registration for discovery phase");
|
|
||||||
self.ensure_dns(topology).await?;
|
|
||||||
Ok(Outcome::new(
|
|
||||||
InterpretStatus::SUCCESS,
|
|
||||||
"Inventory DNS prepared".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
|
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
|
||||||
// - This score exposes/ensures the default inventory assets and waits for discoveries.
|
// - This score exposes/ensures the default inventory assets and waits for discoveries.
|
||||||
@@ -350,12 +239,10 @@ impl<T: Topology + DnsServer> Interpret<T> for OKDSetup01InventoryDnsInterpret {
|
|||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
struct OKDSetup01InventoryScore {
|
struct OKDSetup01InventoryScore {}
|
||||||
lan_cidr: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for OKDSetup01InventoryScore {
|
impl Score<HAClusterTopology> for OKDSetup01InventoryScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup01InventoryInterpret::new(self.clone()))
|
Box::new(OKDSetup01InventoryInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -380,35 +267,10 @@ impl OKDSetup01InventoryInterpret {
|
|||||||
status: InterpretStatus::QUEUED,
|
status: InterpretStatus::QUEUED,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ensure_inventory_assets<T: Topology>(
|
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
// Placeholder: push or verify iPXE default, Kickstart, and Rust inventory agent are hosted.
|
|
||||||
// Real implementation: publish to the PXE/HTTP server via the topology.
|
|
||||||
info!(
|
|
||||||
"[Inventory] Ensuring default iPXE, Kickstart, and inventory agent are available for LAN {}",
|
|
||||||
self.score.lan_cidr
|
|
||||||
);
|
|
||||||
// topology.publish_http_asset(…) ?
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn discover_nodes(&self) -> Result<usize, InterpretError> {
|
|
||||||
// Placeholder: implement Harmony discovery logic (scan/pull/push mode).
|
|
||||||
// Returns number of newly discovered nodes.
|
|
||||||
info!(
|
|
||||||
"[Inventory] Scanning for inventory agents in {}",
|
|
||||||
self.score.lan_cidr
|
|
||||||
);
|
|
||||||
// In practice, this would query harmony_composer or a local registry store.
|
|
||||||
Ok(3)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology> Interpret<T> for OKDSetup01InventoryInterpret {
|
impl Interpret<HAClusterTopology> for OKDSetup01InventoryInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("OKDSetup01Inventory")
|
InterpretName::Custom("OKDSetup01Inventory")
|
||||||
}
|
}
|
||||||
@@ -427,15 +289,86 @@ impl<T: Topology> Interpret<T> for OKDSetup01InventoryInterpret {
|
|||||||
|
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.ensure_inventory_assets(topology).await?;
|
info!("Setting up base DNS config for OKD");
|
||||||
let count = self.discover_nodes().await?;
|
let cluster_domain = &topology.domain_name;
|
||||||
info!("[Inventory] Discovered {count} nodes");
|
let load_balancer_ip = &topology.load_balancer.get_ip();
|
||||||
|
inquire::Confirm::new(&format!(
|
||||||
|
"Set hostnames manually in your opnsense dnsmasq config :
|
||||||
|
*.apps.{cluster_domain} -> {load_balancer_ip}
|
||||||
|
api.{cluster_domain} -> {load_balancer_ip}
|
||||||
|
api-int.{cluster_domain} -> {load_balancer_ip}
|
||||||
|
|
||||||
|
When you can dig them, confirm to continue.
|
||||||
|
"
|
||||||
|
))
|
||||||
|
.prompt()
|
||||||
|
.expect("Prompt error");
|
||||||
|
// TODO reactivate automatic dns config when migration from unbound to dnsmasq is done
|
||||||
|
// OKDDnsScore::new(topology)
|
||||||
|
// .interpret(inventory, topology)
|
||||||
|
// .await?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`"
|
||||||
|
);
|
||||||
|
LaunchDiscoverInventoryAgentScore {
|
||||||
|
discovery_timeout: None,
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let bootstrap_host: PhysicalHost;
|
||||||
|
let host_repo = InventoryRepositoryFactory::build().await?;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let all_hosts = host_repo.get_all_hosts().await?;
|
||||||
|
|
||||||
|
if all_hosts.is_empty() {
|
||||||
|
warn!("No discovered hosts found yet. Waiting for hosts to appear...");
|
||||||
|
// Sleep to avoid spamming the user and logs while waiting for nodes.
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let ans = inquire::Select::new(
|
||||||
|
"Select the node to be used as the bootstrap node:",
|
||||||
|
all_hosts,
|
||||||
|
)
|
||||||
|
.with_help_message("Press Esc to refresh the list of discovered hosts")
|
||||||
|
.prompt();
|
||||||
|
|
||||||
|
match ans {
|
||||||
|
Ok(choice) => {
|
||||||
|
info!("Selected {} as the bootstrap node.", choice.summary());
|
||||||
|
host_repo
|
||||||
|
.save_role_mapping(&HostRole::Bootstrap, &choice)
|
||||||
|
.await?;
|
||||||
|
bootstrap_host = choice;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(inquire::InquireError::OperationCanceled) => {
|
||||||
|
info!("Refresh requested. Fetching list of discovered hosts again...");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to select bootstrap node: {}", e);
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Could not select host : {}",
|
||||||
|
e.to_string()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Outcome::new(
|
Ok(Outcome::new(
|
||||||
InterpretStatus::SUCCESS,
|
InterpretStatus::SUCCESS,
|
||||||
format!("Inventory phase complete. Nodes discovered: {count}"),
|
format!(
|
||||||
|
"Found and assigned bootstrap node: {}",
|
||||||
|
bootstrap_host.summary()
|
||||||
|
),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -449,14 +382,11 @@ impl<T: Topology> Interpret<T> for OKDSetup01InventoryInterpret {
|
|||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
struct OKDSetup02BootstrapScore {
|
struct OKDSetup02BootstrapScore {}
|
||||||
public_domain: String,
|
|
||||||
internal_domain: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for OKDSetup02BootstrapScore {
|
impl Score<HAClusterTopology> for OKDSetup02BootstrapScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup02BootstrapInterpret::new(self.clone()))
|
Box::new(OKDSetup02BootstrapInterpret::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
@@ -466,42 +396,325 @@ impl<T: Topology> Score<T> for OKDSetup02BootstrapScore {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct OKDSetup02BootstrapInterpret {
|
struct OKDSetup02BootstrapInterpret {
|
||||||
score: OKDSetup02BootstrapScore,
|
|
||||||
version: Version,
|
version: Version,
|
||||||
status: InterpretStatus,
|
status: InterpretStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OKDSetup02BootstrapInterpret {
|
impl OKDSetup02BootstrapInterpret {
|
||||||
pub fn new(score: OKDSetup02BootstrapScore) -> Self {
|
pub fn new() -> Self {
|
||||||
let version = Version::from("1.0.0").unwrap();
|
let version = Version::from("1.0.0").unwrap();
|
||||||
Self {
|
Self {
|
||||||
version,
|
version,
|
||||||
score,
|
|
||||||
status: InterpretStatus::QUEUED,
|
status: InterpretStatus::QUEUED,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn render_per_mac_pxe(&self) -> Result<(), InterpretError> {
|
async fn get_bootstrap_node(&self) -> Result<PhysicalHost, InterpretError> {
|
||||||
// Placeholder: use Harmony templates to emit {MAC}.ipxe selecting SCOS live + bootstrap ignition.
|
let repo = InventoryRepositoryFactory::build().await?;
|
||||||
|
match repo
|
||||||
|
.get_host_for_role(HostRole::Bootstrap)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.next()
|
||||||
|
{
|
||||||
|
Some(host) => Ok(host),
|
||||||
|
None => Err(InterpretError::new(
|
||||||
|
"No bootstrap node available".to_string(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn prepare_ignition_files(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
let okd_bin_path = PathBuf::from("./data/okd/bin");
|
||||||
|
let okd_installation_path_str = "./data/okd/installation_files";
|
||||||
|
let okd_images_path = &PathBuf::from("./data/okd/installer_image/");
|
||||||
|
let okd_installation_path = &PathBuf::from(okd_installation_path_str);
|
||||||
|
|
||||||
|
let exit_status = Command::new("mkdir")
|
||||||
|
.arg("-p")
|
||||||
|
.arg(okd_installation_path)
|
||||||
|
.spawn()
|
||||||
|
.expect("Command failed to start")
|
||||||
|
.wait()
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
InterpretError::new(format!("Failed to create okd installation directory : {e}"))
|
||||||
|
})?;
|
||||||
|
if !exit_status.success() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Failed to create okd installation directory"
|
||||||
|
)));
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"Created OKD installation directory {}",
|
||||||
|
okd_installation_path.to_string_lossy()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let redhat_secret = SecretManager::get::<RedhatSecret>().await?;
|
||||||
|
let ssh_key = SecretManager::get::<SshKeyPair>().await?;
|
||||||
|
|
||||||
|
let install_config_yaml = InstallConfigYaml {
|
||||||
|
cluster_name: &topology.get_cluster_name(),
|
||||||
|
cluster_domain: &topology.get_cluster_base_domain(),
|
||||||
|
pull_secret: &redhat_secret.pull_secret,
|
||||||
|
ssh_public_key: &ssh_key.public,
|
||||||
|
}
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let install_config_file_path = &okd_installation_path.join("install-config.yaml");
|
||||||
|
|
||||||
|
self.create_file(install_config_file_path, install_config_yaml.as_bytes())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let install_config_backup_extension = install_config_file_path
|
||||||
|
.extension()
|
||||||
|
.map(|e| format!("{}.bak", e.to_string_lossy()))
|
||||||
|
.unwrap_or("bak".to_string());
|
||||||
|
|
||||||
|
let mut install_config_backup = install_config_file_path.clone();
|
||||||
|
install_config_backup.set_extension(install_config_backup_extension);
|
||||||
|
|
||||||
|
self.create_file(&install_config_backup, install_config_yaml.as_bytes())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("Creating manifest files with openshift-install");
|
||||||
|
let output = Command::new(okd_bin_path.join("openshift-install"))
|
||||||
|
.args([
|
||||||
|
"create",
|
||||||
|
"manifests",
|
||||||
|
"--dir",
|
||||||
|
okd_installation_path.to_str().unwrap(),
|
||||||
|
])
|
||||||
|
.output()
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("Failed to create okd manifest : {e}")))?;
|
||||||
|
let stdout = String::from_utf8(output.stdout).unwrap();
|
||||||
|
info!("openshift-install stdout :\n\n{}", stdout);
|
||||||
|
let stderr = String::from_utf8(output.stderr).unwrap();
|
||||||
|
info!("openshift-install stderr :\n\n{}", stderr);
|
||||||
|
info!("openshift-install exit status : {}", output.status);
|
||||||
|
if !output.status.success() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Failed to create okd manifest, exit code {} : {}",
|
||||||
|
output.status, stderr
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Creating ignition files with openshift-install");
|
||||||
|
let output = Command::new(okd_bin_path.join("openshift-install"))
|
||||||
|
.args([
|
||||||
|
"create",
|
||||||
|
"ignition-configs",
|
||||||
|
"--dir",
|
||||||
|
okd_installation_path.to_str().unwrap(),
|
||||||
|
])
|
||||||
|
.output()
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
InterpretError::new(format!("Failed to create okd ignition config : {e}"))
|
||||||
|
})?;
|
||||||
|
let stdout = String::from_utf8(output.stdout).unwrap();
|
||||||
|
info!("openshift-install stdout :\n\n{}", stdout);
|
||||||
|
let stderr = String::from_utf8(output.stderr).unwrap();
|
||||||
|
info!("openshift-install stderr :\n\n{}", stderr);
|
||||||
|
info!("openshift-install exit status : {}", output.status);
|
||||||
|
if !output.status.success() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Failed to create okd manifest, exit code {} : {}",
|
||||||
|
output.status, stderr
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let ignition_files_http_path = PathBuf::from("okd_ignition_files");
|
||||||
|
let prepare_file_content = async |filename: &str| -> Result<FileContent, InterpretError> {
|
||||||
|
let local_path = okd_installation_path.join(filename);
|
||||||
|
let remote_path = ignition_files_http_path.join(filename);
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Preparing file content for local file : {} to remote : {}",
|
||||||
|
local_path.to_string_lossy(),
|
||||||
|
remote_path.to_string_lossy()
|
||||||
|
);
|
||||||
|
|
||||||
|
let content = tokio::fs::read_to_string(&local_path).await.map_err(|e| {
|
||||||
|
InterpretError::new(format!(
|
||||||
|
"Could not read file content {} : {e}",
|
||||||
|
local_path.to_string_lossy()
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(FileContent {
|
||||||
|
path: FilePath::Relative(remote_path.to_string_lossy().to_string()),
|
||||||
|
content,
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
StaticFilesHttpScore {
|
||||||
|
remote_path: None,
|
||||||
|
folder_to_serve: None,
|
||||||
|
files: vec![
|
||||||
|
prepare_file_content("bootstrap.ign").await?,
|
||||||
|
prepare_file_content("master.ign").await?,
|
||||||
|
prepare_file_content("worker.ign").await?,
|
||||||
|
prepare_file_content("metadata.json").await?,
|
||||||
|
],
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let run_command =
|
||||||
|
async |cmd: &str, args: Vec<&str>| -> Result<std::process::Output, InterpretError> {
|
||||||
|
let output = Command::new(cmd).args(&args).output().await.map_err(|e| {
|
||||||
|
InterpretError::new(format!("Failed to launch command {cmd} : {e}"))
|
||||||
|
})?;
|
||||||
|
let stdout = String::from_utf8(output.stdout.clone()).unwrap();
|
||||||
|
info!("{cmd} stdout :\n\n{}", stdout);
|
||||||
|
let stderr = String::from_utf8(output.stderr.clone()).unwrap();
|
||||||
|
info!("{cmd} stderr :\n\n{}", stderr);
|
||||||
|
info!("{cmd} exit status : {}", output.status);
|
||||||
|
if !output.status.success() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Command execution failed, exit code {} : {} {}",
|
||||||
|
output.status,
|
||||||
|
cmd,
|
||||||
|
args.join(" ")
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok(output)
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("Successfully prepared ignition files for OKD installation");
|
||||||
|
// ignition_files_http_path // = PathBuf::from("okd_ignition_files");
|
||||||
|
info!(
|
||||||
|
r#"Uploading images, they can be refreshed with a command similar to this one: openshift-install coreos print-stream-json | grep -Eo '"https.*(kernel.|initramfs.|rootfs.)\w+(\.img)?"' | grep x86_64 | xargs -n 1 curl -LO"#
|
||||||
|
);
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
"TODO push installer image files with `scp -r data/okd/installer_image/* root@192.168.1.1:/usr/local/http/scos/` until performance issue is resolved"
|
||||||
|
);
|
||||||
|
inquire::Confirm::new(
|
||||||
|
"push installer image files with `scp -r data/okd/installer_image/* root@192.168.1.1:/usr/local/http/scos/` until performance issue is resolved").prompt().expect("Prompt error");
|
||||||
|
|
||||||
|
// let scos_http_path = PathBuf::from("scos");
|
||||||
|
// StaticFilesHttpScore {
|
||||||
|
// folder_to_serve: Some(Url::LocalFolder(
|
||||||
|
// okd_images_path.to_string_lossy().to_string(),
|
||||||
|
// )),
|
||||||
|
// remote_path: Some(scos_http_path.to_string_lossy().to_string()),
|
||||||
|
// files: vec![],
|
||||||
|
// }
|
||||||
|
// .interpret(inventory, topology)
|
||||||
|
// .await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_host_binding(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
let binding = HostBinding {
|
||||||
|
logical_host: topology.bootstrap_host.clone(),
|
||||||
|
physical_host: self.get_bootstrap_node().await?,
|
||||||
|
};
|
||||||
|
info!("Configuring host binding for bootstrap node {binding:?}");
|
||||||
|
|
||||||
|
DhcpHostBindingScore {
|
||||||
|
host_binding: vec![binding],
|
||||||
|
domain: Some(topology.domain_name.clone()),
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn render_per_mac_pxe(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
let content = BootstrapIpxeTpl {
|
||||||
|
http_ip: &topology.http_server.get_ip().to_string(),
|
||||||
|
scos_path: "scos", // TODO use some constant
|
||||||
|
installation_device: "/dev/sda", // TODO do something smart based on the host drives
|
||||||
|
// topology. Something like use the smallest device
|
||||||
|
// above 200G that is an ssd
|
||||||
|
}
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let bootstrap_node = self.get_bootstrap_node().await?;
|
||||||
|
let mac_address = bootstrap_node.get_mac_address();
|
||||||
|
|
||||||
info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node");
|
info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node");
|
||||||
|
debug!("bootstrap ipxe content : {content}");
|
||||||
|
debug!("bootstrap mac addresses : {mac_address:?}");
|
||||||
|
|
||||||
|
IPxeMacBootFileScore {
|
||||||
|
mac_address,
|
||||||
|
content,
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn setup_bootstrap_load_balancer(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
let outcome = OKDBootstrapLoadBalancerScore::new(topology)
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
info!("Successfully executed OKDBootstrapLoadBalancerScore : {outcome:?}");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn reboot_target(&self) -> Result<(), InterpretError> {
|
async fn reboot_target(&self) -> Result<(), InterpretError> {
|
||||||
// Placeholder: ssh reboot using the inventory ephemeral key
|
// Placeholder: ssh reboot using the inventory ephemeral key
|
||||||
info!("[Bootstrap] Rebooting bootstrap node via SSH");
|
info!("[Bootstrap] Rebooting bootstrap node via SSH");
|
||||||
|
// TODO reboot programatically, there are some logical checks and refactoring to do such as
|
||||||
|
// accessing the bootstrap node config (ip address) from the inventory
|
||||||
|
let confirmation = inquire::Confirm::new(
|
||||||
|
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
|
||||||
|
)
|
||||||
|
.with_default(true)
|
||||||
|
.prompt()
|
||||||
|
.expect("Unexpected prompt error");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> {
|
async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> {
|
||||||
// Placeholder: wait-for bootstrap-complete
|
// Placeholder: wait-for bootstrap-complete
|
||||||
info!("[Bootstrap] Waiting for bootstrap-complete …");
|
info!("[Bootstrap] Waiting for bootstrap-complete …");
|
||||||
|
todo!("[Bootstrap] Waiting for bootstrap-complete …")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_file(&self, path: &PathBuf, content: &[u8]) -> Result<(), InterpretError> {
|
||||||
|
let mut install_config_file = File::create(path).await.map_err(|e| {
|
||||||
|
InterpretError::new(format!(
|
||||||
|
"Could not create file {} : {e}",
|
||||||
|
path.to_string_lossy()
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
install_config_file.write(content).await.map_err(|e| {
|
||||||
|
InterpretError::new(format!(
|
||||||
|
"Could not write file {} : {e}",
|
||||||
|
path.to_string_lossy()
|
||||||
|
))
|
||||||
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology> Interpret<T> for OKDSetup02BootstrapInterpret {
|
impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("OKDSetup02Bootstrap")
|
InterpretName::Custom("OKDSetup02Bootstrap")
|
||||||
}
|
}
|
||||||
@@ -520,10 +733,18 @@ impl<T: Topology> Interpret<T> for OKDSetup02BootstrapInterpret {
|
|||||||
|
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
_topology: &T,
|
topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.render_per_mac_pxe().await?;
|
self.configure_host_binding(inventory, topology).await?;
|
||||||
|
self.prepare_ignition_files(inventory, topology).await?;
|
||||||
|
self.render_per_mac_pxe(inventory, topology).await?;
|
||||||
|
self.setup_bootstrap_load_balancer(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// TODO https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-user-provisioned-validating-dns_installing-bare-metal
|
||||||
|
// self.validate_dns_config(inventory, topology).await?;
|
||||||
|
|
||||||
self.reboot_target().await?;
|
self.reboot_target().await?;
|
||||||
self.wait_for_bootstrap_complete().await?;
|
self.wait_for_bootstrap_complete().await?;
|
||||||
|
|
||||||
@@ -543,8 +764,8 @@ impl<T: Topology> Interpret<T> for OKDSetup02BootstrapInterpret {
|
|||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
struct OKDSetup03ControlPlaneScore {}
|
struct OKDSetup03ControlPlaneScore {}
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for OKDSetup03ControlPlaneScore {
|
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
|
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -583,7 +804,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology> Interpret<T> for OKDSetup03ControlPlaneInterpret {
|
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("OKDSetup03ControlPlane")
|
InterpretName::Custom("OKDSetup03ControlPlane")
|
||||||
}
|
}
|
||||||
@@ -603,7 +824,7 @@ impl<T: Topology> Interpret<T> for OKDSetup03ControlPlaneInterpret {
|
|||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
_topology: &T,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.render_and_reboot().await?;
|
self.render_and_reboot().await?;
|
||||||
self.persist_network_bond().await?;
|
self.persist_network_bond().await?;
|
||||||
@@ -623,8 +844,8 @@ impl<T: Topology> Interpret<T> for OKDSetup03ControlPlaneInterpret {
|
|||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
struct OKDSetup04WorkersScore {}
|
struct OKDSetup04WorkersScore {}
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for OKDSetup04WorkersScore {
|
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
|
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -657,7 +878,7 @@ impl OKDSetup04WorkersInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology> Interpret<T> for OKDSetup04WorkersInterpret {
|
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("OKDSetup04Workers")
|
InterpretName::Custom("OKDSetup04Workers")
|
||||||
}
|
}
|
||||||
@@ -677,7 +898,7 @@ impl<T: Topology> Interpret<T> for OKDSetup04WorkersInterpret {
|
|||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
_topology: &T,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.render_and_reboot().await?;
|
self.render_and_reboot().await?;
|
||||||
Ok(Outcome::new(
|
Ok(Outcome::new(
|
||||||
@@ -695,8 +916,8 @@ impl<T: Topology> Interpret<T> for OKDSetup04WorkersInterpret {
|
|||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
struct OKDSetup05SanityCheckScore {}
|
struct OKDSetup05SanityCheckScore {}
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for OKDSetup05SanityCheckScore {
|
impl Score<HAClusterTopology> for OKDSetup05SanityCheckScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup05SanityCheckInterpret::new(self.clone()))
|
Box::new(OKDSetup05SanityCheckInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -729,7 +950,7 @@ impl OKDSetup05SanityCheckInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology> Interpret<T> for OKDSetup05SanityCheckInterpret {
|
impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("OKDSetup05SanityCheck")
|
InterpretName::Custom("OKDSetup05SanityCheck")
|
||||||
}
|
}
|
||||||
@@ -749,7 +970,7 @@ impl<T: Topology> Interpret<T> for OKDSetup05SanityCheckInterpret {
|
|||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
_topology: &T,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.run_checks().await?;
|
self.run_checks().await?;
|
||||||
Ok(Outcome::new(
|
Ok(Outcome::new(
|
||||||
@@ -765,13 +986,10 @@ impl<T: Topology> Interpret<T> for OKDSetup05SanityCheckInterpret {
|
|||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
struct OKDSetup06InstallationReportScore {
|
struct OKDSetup06InstallationReportScore {}
|
||||||
public_domain: String,
|
|
||||||
internal_domain: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for OKDSetup06InstallationReportScore {
|
impl Score<HAClusterTopology> for OKDSetup06InstallationReportScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup06InstallationReportInterpret::new(self.clone()))
|
Box::new(OKDSetup06InstallationReportInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -798,16 +1016,13 @@ impl OKDSetup06InstallationReportInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn generate(&self) -> Result<(), InterpretError> {
|
async fn generate(&self) -> Result<(), InterpretError> {
|
||||||
info!(
|
info!("[Report] Generating OKD installation report",);
|
||||||
"[Report] Generating installation report for {} / {}",
|
|
||||||
self.score.public_domain, self.score.internal_domain
|
|
||||||
);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology> Interpret<T> for OKDSetup06InstallationReportInterpret {
|
impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("OKDSetup06InstallationReport")
|
InterpretName::Custom("OKDSetup06InstallationReport")
|
||||||
}
|
}
|
||||||
@@ -827,7 +1042,7 @@ impl<T: Topology> Interpret<T> for OKDSetup06InstallationReportInterpret {
|
|||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
_topology: &T,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.generate().await?;
|
self.generate().await?;
|
||||||
Ok(Outcome::new(
|
Ok(Outcome::new(
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use askama::Template;
|
use askama::Template;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::{IpAddress, Url};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::net::IpAddr;
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::{FileContent, FilePath, Version},
|
data::{FileContent, FilePath, Version},
|
||||||
@@ -46,19 +46,32 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f
|
|||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let gateway_ip = topology.get_gateway();
|
let gateway_ip = topology.get_gateway();
|
||||||
|
|
||||||
|
let dhcp_server_ip = match DhcpServer::get_ip(topology) {
|
||||||
|
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
|
||||||
|
std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO this could overflow, we should use proper subnet maths here instead of an ip
|
||||||
|
// address and guessing the subnet size from there
|
||||||
|
let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100);
|
||||||
|
let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150);
|
||||||
|
|
||||||
let scores: Vec<Box<dyn Score<T>>> = vec![
|
let scores: Vec<Box<dyn Score<T>>> = vec![
|
||||||
Box::new(DhcpScore {
|
Box::new(DhcpScore {
|
||||||
host_binding: vec![],
|
host_binding: vec![],
|
||||||
|
domain: None,
|
||||||
next_server: Some(topology.get_gateway()),
|
next_server: Some(topology.get_gateway()),
|
||||||
boot_filename: None,
|
boot_filename: None,
|
||||||
filename: Some("undionly.kpxe".to_string()),
|
filename: Some("undionly.kpxe".to_string()),
|
||||||
filename64: Some("ipxe.efi".to_string()),
|
filename64: Some("ipxe.efi".to_string()),
|
||||||
filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()),
|
filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()),
|
||||||
|
dhcp_range: (IpAddress::from(start), IpAddress::from(end)),
|
||||||
}),
|
}),
|
||||||
Box::new(TftpScore {
|
Box::new(TftpScore {
|
||||||
files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()),
|
files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()),
|
||||||
}),
|
}),
|
||||||
Box::new(StaticFilesHttpScore {
|
Box::new(StaticFilesHttpScore {
|
||||||
|
remote_path: None,
|
||||||
// TODO The current russh based copy is way too slow, check for a lib update or use scp
|
// TODO The current russh based copy is way too slow, check for a lib update or use scp
|
||||||
// when available
|
// when available
|
||||||
//
|
//
|
||||||
@@ -107,6 +120,7 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f
|
|||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
inquire::Confirm::new("Execute the copy : `scp -r data/pxe/okd/http_files/* root@192.168.1.1:/usr/local/http/` and confirm when done to continue").prompt().expect("Prompt error");
|
||||||
|
|
||||||
Ok(Outcome::success("Ipxe installed".to_string()))
|
Ok(Outcome::success("Ipxe installed".to_string()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,4 +5,5 @@ pub mod dns;
|
|||||||
pub mod installation;
|
pub mod installation;
|
||||||
pub mod ipxe;
|
pub mod ipxe;
|
||||||
pub mod load_balancer;
|
pub mod load_balancer;
|
||||||
|
pub mod templates;
|
||||||
pub mod upgrade;
|
pub mod upgrade;
|
||||||
|
|||||||
18
harmony/src/modules/okd/templates.rs
Normal file
18
harmony/src/modules/okd/templates.rs
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
use askama::Template;
|
||||||
|
|
||||||
|
#[derive(Template)]
|
||||||
|
#[template(path = "okd/install-config.yaml.j2")]
|
||||||
|
pub struct InstallConfigYaml<'a> {
|
||||||
|
pub cluster_domain: &'a str,
|
||||||
|
pub pull_secret: &'a str,
|
||||||
|
pub ssh_public_key: &'a str,
|
||||||
|
pub cluster_name: &'a str,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Template)]
|
||||||
|
#[template(path = "okd/bootstrap.ipxe.j2")]
|
||||||
|
pub struct BootstrapIpxeTpl<'a> {
|
||||||
|
pub http_ip: &'a str,
|
||||||
|
pub scos_path: &'a str,
|
||||||
|
pub installation_device: &'a str,
|
||||||
|
}
|
||||||
7
harmony/templates/okd/bootstrap.ipxe.j2
Normal file
7
harmony/templates/okd/bootstrap.ipxe.j2
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
set base-url http://{{ http_ip }}:8080
|
||||||
|
set scos-base-url = ${base-url}/{{ scos_path }}
|
||||||
|
set installation-device = {{ installation_device }}
|
||||||
|
|
||||||
|
kernel ${scos-base-url}/scos-live-kernel.x86_64 initrd=main coreos.live.rootfs_url=${scos-base-url}/scos-live-rootfs.x86_64.img coreos.inst.install_dev=${installation-device} coreos.inst.ignition_url=${base-url}/bootstrap.ign
|
||||||
|
initrd --name main ${scos-base-url}/scos-live-initramfs.x86_64.img
|
||||||
|
boot
|
||||||
24
harmony/templates/okd/install-config.yaml.j2
Normal file
24
harmony/templates/okd/install-config.yaml.j2
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# Built from https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-bare-metal-config-yaml_installing-bare-metal
|
||||||
|
apiVersion: v1
|
||||||
|
baseDomain: {{ cluster_domain }}
|
||||||
|
compute:
|
||||||
|
- hyperthreading: Enabled
|
||||||
|
name: worker
|
||||||
|
replicas: 0
|
||||||
|
controlPlane:
|
||||||
|
hyperthreading: Enabled
|
||||||
|
name: master
|
||||||
|
replicas: 3
|
||||||
|
metadata:
|
||||||
|
name: {{ cluster_name }}
|
||||||
|
networking:
|
||||||
|
clusterNetwork:
|
||||||
|
- cidr: 10.128.0.0/14
|
||||||
|
hostPrefix: 23
|
||||||
|
networkType: OVNKubernetes
|
||||||
|
serviceNetwork:
|
||||||
|
- 172.30.0.0/16
|
||||||
|
platform:
|
||||||
|
none: {}
|
||||||
|
pullSecret: '{{ pull_secret|safe }}'
|
||||||
|
sshKey: '{{ ssh_public_key }}'
|
||||||
@@ -9,6 +9,7 @@ use config::INFISICAL_ENVIRONMENT;
|
|||||||
use config::INFISICAL_PROJECT_ID;
|
use config::INFISICAL_PROJECT_ID;
|
||||||
use config::INFISICAL_URL;
|
use config::INFISICAL_URL;
|
||||||
use config::SECRET_STORE;
|
use config::SECRET_STORE;
|
||||||
|
use log::debug;
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use store::InfisicalSecretStore;
|
use store::InfisicalSecretStore;
|
||||||
@@ -101,6 +102,7 @@ impl SecretManager {
|
|||||||
/// Retrieves and deserializes a secret.
|
/// Retrieves and deserializes a secret.
|
||||||
pub async fn get<T: Secret>() -> Result<T, SecretStoreError> {
|
pub async fn get<T: Secret>() -> Result<T, SecretStoreError> {
|
||||||
let manager = get_secret_manager().await;
|
let manager = get_secret_manager().await;
|
||||||
|
debug!("Getting secret ns {} key {}", &manager.namespace, T::KEY);
|
||||||
let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?;
|
let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?;
|
||||||
serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization {
|
serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization {
|
||||||
key: T::KEY.to_string(),
|
key: T::KEY.to_string(),
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::info;
|
use log::{debug, info};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use crate::{SecretStore, SecretStoreError};
|
use crate::{SecretStore, SecretStoreError};
|
||||||
@@ -24,17 +24,25 @@ impl SecretStore for LocalFileSecretStore {
|
|||||||
.join("secrets");
|
.join("secrets");
|
||||||
|
|
||||||
let file_path = Self::get_file_path(&data_dir, ns, key);
|
let file_path = Self::get_file_path(&data_dir, ns, key);
|
||||||
info!(
|
debug!(
|
||||||
"LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}",
|
"LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}",
|
||||||
file_path.display()
|
file_path.display()
|
||||||
);
|
);
|
||||||
|
|
||||||
tokio::fs::read(&file_path)
|
let content =
|
||||||
.await
|
tokio::fs::read(&file_path)
|
||||||
.map_err(|_| SecretStoreError::NotFound {
|
.await
|
||||||
namespace: ns.to_string(),
|
.map_err(|_| SecretStoreError::NotFound {
|
||||||
key: key.to_string(),
|
namespace: ns.to_string(),
|
||||||
})
|
key: key.to_string(),
|
||||||
|
})?;
|
||||||
|
info!(
|
||||||
|
"Sum of all vec get {ns} {key} {:?}",
|
||||||
|
content
|
||||||
|
.iter()
|
||||||
|
.fold(0, |acc: u64, val: &u8| { acc + *val as u64 })
|
||||||
|
);
|
||||||
|
Ok(content)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn set_raw(&self, ns: &str, key: &str, val: &[u8]) -> Result<(), SecretStoreError> {
|
async fn set_raw(&self, ns: &str, key: &str, val: &[u8]) -> Result<(), SecretStoreError> {
|
||||||
@@ -56,6 +64,12 @@ impl SecretStore for LocalFileSecretStore {
|
|||||||
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
|
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Sum of all vec set {ns} {key} {:?}",
|
||||||
|
val.iter()
|
||||||
|
.fold(0, |acc: u64, val: &u8| { acc + *val as u64 })
|
||||||
|
);
|
||||||
|
|
||||||
tokio::fs::write(&file_path, val)
|
tokio::fs::write(&file_path, val)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| SecretStoreError::Store(Box::new(e)))
|
.map_err(|e| SecretStoreError::Store(Box::new(e)))
|
||||||
|
|||||||
@@ -48,6 +48,12 @@ impl From<String> for Id {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<Id> for String {
|
||||||
|
fn from(value: Id) -> Self {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Id {
|
impl std::fmt::Display for Id {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_str(&self.value)
|
f.write_str(&self.value)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ impl From<&MacAddress> for String {
|
|||||||
|
|
||||||
impl std::fmt::Display for MacAddress {
|
impl std::fmt::Display for MacAddress {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_fmt(format_args!("MacAddress {}", String::from(self)))
|
f.write_str(&String::from(self))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
5
migrations/20250902035357_Host_role_mapping.sql
Normal file
5
migrations/20250902035357_Host_role_mapping.sql
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
CREATE TABLE IF NOT EXISTS host_role_mapping (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
host_id TEXT NOT NULL,
|
||||||
|
role TEXT NOT NULL
|
||||||
|
);
|
||||||
@@ -36,6 +36,27 @@ pub struct DnsMasq {
|
|||||||
pub dhcp_options: Vec<DhcpOptions>,
|
pub dhcp_options: Vec<DhcpOptions>,
|
||||||
pub dhcp_boot: Vec<DhcpBoot>,
|
pub dhcp_boot: Vec<DhcpBoot>,
|
||||||
pub dhcp_tags: Vec<RawXml>,
|
pub dhcp_tags: Vec<RawXml>,
|
||||||
|
pub hosts: Vec<DnsmasqHost>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize, Clone)]
|
||||||
|
#[yaserde(rename = "hosts")]
|
||||||
|
pub struct DnsmasqHost {
|
||||||
|
#[yaserde(attribute = true)]
|
||||||
|
pub uuid: String,
|
||||||
|
pub host: String,
|
||||||
|
pub domain: MaybeString,
|
||||||
|
pub local: MaybeString,
|
||||||
|
pub ip: MaybeString,
|
||||||
|
pub cnames: MaybeString,
|
||||||
|
pub client_id: MaybeString,
|
||||||
|
pub hwaddr: MaybeString,
|
||||||
|
pub lease_time: MaybeString,
|
||||||
|
pub ignore: Option<u8>,
|
||||||
|
pub set_tag: MaybeString,
|
||||||
|
pub descr: MaybeString,
|
||||||
|
pub comments: MaybeString,
|
||||||
|
pub aliases: MaybeString,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Represents the <dhcp> element and its nested fields.
|
// Represents the <dhcp> element and its nested fields.
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use crate::{
|
|||||||
config::{SshConfigManager, SshCredentials, SshOPNSenseShell},
|
config::{SshConfigManager, SshCredentials, SshOPNSenseShell},
|
||||||
error::Error,
|
error::Error,
|
||||||
modules::{
|
modules::{
|
||||||
caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::DnsConfig,
|
caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::UnboundDnsConfig,
|
||||||
dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig,
|
dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -51,8 +51,8 @@ impl Config {
|
|||||||
DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone())
|
DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dns(&mut self) -> DnsConfig<'_> {
|
pub fn dns(&mut self) -> DhcpConfigDnsMasq<'_> {
|
||||||
DnsConfig::new(&mut self.opnsense)
|
DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tftp(&mut self) -> TftpConfig<'_> {
|
pub fn tftp(&mut self) -> TftpConfig<'_> {
|
||||||
@@ -226,6 +226,7 @@ mod tests {
|
|||||||
"src/tests/data/config-full-ncd0.xml",
|
"src/tests/data/config-full-ncd0.xml",
|
||||||
"src/tests/data/config-full-25.7.xml",
|
"src/tests/data/config-full-25.7.xml",
|
||||||
"src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
|
"src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
|
||||||
|
"src/tests/data/config-25.7-dnsmasq-static-host.xml",
|
||||||
] {
|
] {
|
||||||
let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||||
test_file_path.push(path);
|
test_file_path.push(path);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum DhcpError {
|
pub enum DhcpError {
|
||||||
InvalidMacAddress(String),
|
InvalidMacAddress(String),
|
||||||
InvalidIpAddress(String),
|
InvalidIpAddress(String),
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use opnsense_config_xml::{Host, OPNsense};
|
use opnsense_config_xml::{Host, OPNsense};
|
||||||
|
|
||||||
pub struct DnsConfig<'a> {
|
pub struct UnboundDnsConfig<'a> {
|
||||||
opnsense: &'a mut OPNsense,
|
opnsense: &'a mut OPNsense,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> DnsConfig<'a> {
|
impl<'a> UnboundDnsConfig<'a> {
|
||||||
pub fn new(opnsense: &'a mut OPNsense) -> Self {
|
pub fn new(opnsense: &'a mut OPNsense) -> Self {
|
||||||
Self { opnsense }
|
Self { opnsense }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
// dnsmasq.rs
|
// dnsmasq.rs
|
||||||
use crate::modules::dhcp::DhcpError;
|
use crate::modules::dhcp::DhcpError;
|
||||||
use log::{debug, info};
|
use log::{debug, info, warn};
|
||||||
|
use opnsense_config_xml::dnsmasq::{DhcpRange, DnsMasq, DnsmasqHost}; // Assuming DhcpRange is defined in opnsense_config_xml::dnsmasq
|
||||||
use opnsense_config_xml::{MaybeString, StaticMap};
|
use opnsense_config_xml::{MaybeString, StaticMap};
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::net::Ipv4Addr;
|
use std::net::Ipv4Addr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
use opnsense_config_xml::OPNsense;
|
use opnsense_config_xml::OPNsense;
|
||||||
|
|
||||||
@@ -25,74 +28,167 @@ impl<'a> DhcpConfigDnsMasq<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a static mapping by its MAC address.
|
/// Removes a MAC address from a static mapping.
|
||||||
/// Static mappings are stored in the <dhcpd> section of the config, shared with the ISC module.
|
/// If the mapping has no other MAC addresses associated with it, the entire host entry is removed.
|
||||||
pub fn remove_static_mapping(&mut self, mac: &str) {
|
pub fn remove_static_mapping(&mut self, mac_to_remove: &str) {
|
||||||
let lan_dhcpd = self.get_lan_dhcpd();
|
let dnsmasq = self.get_dnsmasq();
|
||||||
lan_dhcpd
|
|
||||||
.staticmaps
|
// Update hwaddr fields for hosts that contain the MAC, removing it from the comma-separated list.
|
||||||
.retain(|static_entry| static_entry.mac != mac);
|
for host in dnsmasq.hosts.iter_mut() {
|
||||||
|
let mac = host.hwaddr.content_string();
|
||||||
|
let original_macs: Vec<&str> = mac.split(',').collect();
|
||||||
|
if original_macs
|
||||||
|
.iter()
|
||||||
|
.any(|m| m.eq_ignore_ascii_case(mac_to_remove))
|
||||||
|
{
|
||||||
|
let updated_macs: Vec<&str> = original_macs
|
||||||
|
.into_iter()
|
||||||
|
.filter(|m| !m.eq_ignore_ascii_case(mac_to_remove))
|
||||||
|
.collect();
|
||||||
|
host.hwaddr = updated_macs.join(",").into();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove any host entries that no longer have any MAC addresses.
|
||||||
|
dnsmasq
|
||||||
|
.hosts
|
||||||
|
.retain(|host_entry| !host_entry.hwaddr.content_string().is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves a mutable reference to the LAN interface's DHCP configuration.
|
/// Retrieves a mutable reference to the DnsMasq configuration.
|
||||||
/// This is located in the shared <dhcpd> section of the config.
|
/// This is located in the <dnsmasq> section of the OPNsense config.
|
||||||
fn get_lan_dhcpd(&mut self) -> &mut opnsense_config_xml::DhcpInterface {
|
fn get_dnsmasq(&mut self) -> &mut DnsMasq {
|
||||||
&mut self
|
self.opnsense
|
||||||
.opnsense
|
.dnsmasq
|
||||||
.dhcpd
|
.as_mut()
|
||||||
.elements
|
.expect("Dnsmasq config must be initialized")
|
||||||
.iter_mut()
|
|
||||||
.find(|(name, _config)| name == "lan")
|
|
||||||
.expect("Interface lan should have dhcpd activated")
|
|
||||||
.1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a new static DHCP mapping.
|
/// Adds or updates a static DHCP mapping.
|
||||||
/// Validates the MAC address and checks for existing mappings to prevent conflicts.
|
///
|
||||||
|
/// This function implements specific logic to handle existing entries:
|
||||||
|
/// - If no host exists for the given IP or hostname, a new entry is created.
|
||||||
|
/// - If exactly one host exists for the IP and/or hostname, the new MAC is appended to it.
|
||||||
|
/// - It will error if the IP and hostname exist but point to two different host entries,
|
||||||
|
/// as this represents an unresolvable conflict.
|
||||||
|
/// - It will also error if multiple entries are found for the IP or hostname, indicating an
|
||||||
|
/// ambiguous state.
|
||||||
pub fn add_static_mapping(
|
pub fn add_static_mapping(
|
||||||
&mut self,
|
&mut self,
|
||||||
mac: &str,
|
mac: &Vec<String>,
|
||||||
ipaddr: Ipv4Addr,
|
ipaddr: &Ipv4Addr,
|
||||||
hostname: &str,
|
hostname: &str,
|
||||||
) -> Result<(), DhcpError> {
|
) -> Result<(), DhcpError> {
|
||||||
let mac = mac.to_string();
|
let mut hostname_split = hostname.split(".");
|
||||||
let hostname = hostname.to_string();
|
let hostname = hostname_split.next().expect("hostname cannot be empty");
|
||||||
let lan_dhcpd = self.get_lan_dhcpd();
|
let domain_name = hostname_split.collect::<Vec<&str>>().join(".");
|
||||||
let existing_mappings: &mut Vec<StaticMap> = &mut lan_dhcpd.staticmaps;
|
|
||||||
|
|
||||||
if !Self::is_valid_mac(&mac) {
|
if let Some(m) = mac.iter().find(|m| !Self::is_valid_mac(m)) {
|
||||||
return Err(DhcpError::InvalidMacAddress(mac));
|
return Err(DhcpError::InvalidMacAddress(m.to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Validate that the IP address is within a configured DHCP range.
|
let ip_str = ipaddr.to_string();
|
||||||
|
let hosts = &mut self.get_dnsmasq().hosts;
|
||||||
|
|
||||||
if existing_mappings
|
let ip_indices: Vec<usize> = hosts
|
||||||
.iter()
|
.iter()
|
||||||
.any(|m| m.ipaddr == ipaddr.to_string() && m.mac == mac)
|
.enumerate()
|
||||||
{
|
.filter(|(_, h)| h.ip.content_string() == ip_str)
|
||||||
info!("Mapping already exists for {} [{}], skipping", ipaddr, mac);
|
.map(|(i, _)| i)
|
||||||
return Ok(());
|
.collect();
|
||||||
}
|
|
||||||
|
|
||||||
if existing_mappings
|
let hostname_indices: Vec<usize> = hosts
|
||||||
.iter()
|
.iter()
|
||||||
.any(|m| m.ipaddr == ipaddr.to_string())
|
.enumerate()
|
||||||
|
.filter(|(_, h)| h.host == hostname)
|
||||||
|
.map(|(i, _)| i)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let ip_set: HashSet<usize> = ip_indices.iter().cloned().collect();
|
||||||
|
let hostname_set: HashSet<usize> = hostname_indices.iter().cloned().collect();
|
||||||
|
|
||||||
|
if !ip_indices.is_empty()
|
||||||
|
&& !hostname_indices.is_empty()
|
||||||
|
&& ip_set.intersection(&hostname_set).count() == 0
|
||||||
{
|
{
|
||||||
return Err(DhcpError::IpAddressAlreadyMapped(ipaddr.to_string()));
|
return Err(DhcpError::Configuration(format!(
|
||||||
|
"Configuration conflict: IP {} and hostname '{}' exist, but in different static host entries.",
|
||||||
|
ipaddr, hostname
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if existing_mappings.iter().any(|m| m.mac == mac) {
|
let mut all_indices: Vec<&usize> = ip_set.union(&hostname_set).collect();
|
||||||
return Err(DhcpError::MacAddressAlreadyMapped(mac));
|
all_indices.sort();
|
||||||
|
|
||||||
|
let mac_list = mac.join(",");
|
||||||
|
|
||||||
|
match all_indices.len() {
|
||||||
|
0 => {
|
||||||
|
info!(
|
||||||
|
"Creating new static host for {} ({}) with MAC {}",
|
||||||
|
hostname, ipaddr, mac_list
|
||||||
|
);
|
||||||
|
let new_host = DnsmasqHost {
|
||||||
|
uuid: Uuid::new_v4().to_string(),
|
||||||
|
host: hostname.to_string(),
|
||||||
|
ip: ip_str.into(),
|
||||||
|
hwaddr: mac_list.into(),
|
||||||
|
local: MaybeString::from("1"),
|
||||||
|
ignore: Some(0),
|
||||||
|
domain: domain_name.into(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
hosts.push(new_host);
|
||||||
|
}
|
||||||
|
1 => {
|
||||||
|
let host_index = *all_indices[0];
|
||||||
|
let host_to_modify = &mut hosts[host_index];
|
||||||
|
let host_to_modify_ip = host_to_modify.ip.content_string();
|
||||||
|
if host_to_modify_ip != ip_str {
|
||||||
|
warn!(
|
||||||
|
"Hostname '{}' already exists with a different IP ({}). Setting new IP {ip_str}. Appending MAC {}.",
|
||||||
|
hostname, host_to_modify_ip, mac_list
|
||||||
|
);
|
||||||
|
host_to_modify.ip.content = Some(ip_str);
|
||||||
|
} else if host_to_modify.host != hostname {
|
||||||
|
warn!(
|
||||||
|
"IP {} already exists with a different hostname ('{}'). Setting hostname to {hostname}. Appending MAC {}.",
|
||||||
|
ipaddr, host_to_modify.host, mac_list
|
||||||
|
);
|
||||||
|
host_to_modify.host = hostname.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
for single_mac in mac.iter() {
|
||||||
|
if !host_to_modify
|
||||||
|
.hwaddr
|
||||||
|
.content_string()
|
||||||
|
.split(',')
|
||||||
|
.any(|m| m.eq_ignore_ascii_case(single_mac))
|
||||||
|
{
|
||||||
|
info!(
|
||||||
|
"Appending MAC {} to existing static host for {} ({})",
|
||||||
|
single_mac, host_to_modify.host, host_to_modify_ip
|
||||||
|
);
|
||||||
|
let mut updated_macs = host_to_modify.hwaddr.content_string().to_string();
|
||||||
|
updated_macs.push(',');
|
||||||
|
updated_macs.push_str(single_mac);
|
||||||
|
host_to_modify.hwaddr.content = updated_macs.into();
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
"MAC {} already present in static host entry for {} ({}). No changes made.",
|
||||||
|
single_mac, host_to_modify.host, host_to_modify_ip
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(DhcpError::Configuration(format!(
|
||||||
|
"Configuration conflict: Found multiple host entries matching IP {} and/or hostname '{}'. Cannot resolve automatically.",
|
||||||
|
ipaddr, hostname
|
||||||
|
)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let static_map = StaticMap {
|
|
||||||
mac,
|
|
||||||
ipaddr: ipaddr.to_string(),
|
|
||||||
hostname: hostname,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
existing_mappings.push(static_map);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,13 +206,20 @@ impl<'a> DhcpConfigDnsMasq<'a> {
|
|||||||
/// Retrieves the list of current static mappings by shelling out to `configctl`.
|
/// Retrieves the list of current static mappings by shelling out to `configctl`.
|
||||||
/// This provides the real-time state from the running system.
|
/// This provides the real-time state from the running system.
|
||||||
pub async fn get_static_mappings(&self) -> Result<Vec<StaticMap>, Error> {
|
pub async fn get_static_mappings(&self) -> Result<Vec<StaticMap>, Error> {
|
||||||
|
// Note: This command is for the 'dhcpd' service. If dnsmasq uses a different command
|
||||||
|
// or key, this will need to be adjusted.
|
||||||
let list_static_output = self
|
let list_static_output = self
|
||||||
.opnsense_shell
|
.opnsense_shell
|
||||||
.exec("configctl dhcpd list static")
|
.exec("configctl dhcpd list static")
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let value: serde_json::Value = serde_json::from_str(&list_static_output)
|
let value: serde_json::Value = serde_json::from_str(&list_static_output).map_err(|e| {
|
||||||
.unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}"));
|
Error::Command(format!(
|
||||||
|
"Got invalid json from configctl {list_static_output} : {e}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// The JSON output key might be 'dhcpd' even when dnsmasq is the backend.
|
||||||
let static_maps = value["dhcpd"]
|
let static_maps = value["dhcpd"]
|
||||||
.as_array()
|
.as_array()
|
||||||
.ok_or(Error::Command(format!(
|
.ok_or(Error::Command(format!(
|
||||||
@@ -135,6 +238,36 @@ impl<'a> DhcpConfigDnsMasq<'a> {
|
|||||||
Ok(static_maps)
|
Ok(static_maps)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn set_dhcp_range(&mut self, start: &str, end: &str) -> Result<(), DhcpError> {
|
||||||
|
let dnsmasq = self.get_dnsmasq();
|
||||||
|
let ranges = &mut dnsmasq.dhcp_ranges;
|
||||||
|
|
||||||
|
// Assuming DnsMasq has dhcp_ranges: Vec<DhcpRange>
|
||||||
|
// Find existing range for "lan" interface
|
||||||
|
if let Some(range) = ranges
|
||||||
|
.iter_mut()
|
||||||
|
.find(|r| r.interface == Some("lan".to_string()))
|
||||||
|
{
|
||||||
|
// Update existing range
|
||||||
|
range.start_addr = Some(start.to_string());
|
||||||
|
range.end_addr = Some(end.to_string());
|
||||||
|
} else {
|
||||||
|
// Create new range
|
||||||
|
let new_range = DhcpRange {
|
||||||
|
uuid: Some(Uuid::new_v4().to_string()),
|
||||||
|
interface: Some("lan".to_string()),
|
||||||
|
start_addr: Some(start.to_string()),
|
||||||
|
end_addr: Some(end.to_string()),
|
||||||
|
domain_type: Some("range".to_string()),
|
||||||
|
nosync: Some(0),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
ranges.push(new_range);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn set_pxe_options(
|
pub async fn set_pxe_options(
|
||||||
&self,
|
&self,
|
||||||
tftp_ip: Option<String>,
|
tftp_ip: Option<String>,
|
||||||
@@ -142,9 +275,9 @@ impl<'a> DhcpConfigDnsMasq<'a> {
|
|||||||
efi_filename: String,
|
efi_filename: String,
|
||||||
ipxe_filename: String,
|
ipxe_filename: String,
|
||||||
) -> Result<(), DhcpError> {
|
) -> Result<(), DhcpError> {
|
||||||
// As of writing this opnsense does not support negative tags, and the dnsmasq config is a
|
// OPNsense does not support negative tags via its API for dnsmasq, and the required
|
||||||
// bit complicated anyways. So we are writing directly a dnsmasq config file to
|
// logic is complex. Therefore, we write a configuration file directly to the
|
||||||
// /usr/local/etc/dnsmasq.conf.d
|
// dnsmasq.conf.d directory to achieve the desired PXE boot behavior.
|
||||||
let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}"));
|
let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}"));
|
||||||
|
|
||||||
let config = format!(
|
let config = format!(
|
||||||
@@ -185,3 +318,288 @@ dhcp-boot=tag:bios,{bios_filename}{tftp_str}
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use crate::config::DummyOPNSenseShell;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use opnsense_config_xml::OPNsense;
|
||||||
|
use std::net::Ipv4Addr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
/// Helper function to create a DnsmasqHost with minimal boilerplate.
|
||||||
|
fn create_host(uuid: &str, host: &str, ip: &str, hwaddr: &str) -> DnsmasqHost {
|
||||||
|
DnsmasqHost {
|
||||||
|
uuid: uuid.to_string(),
|
||||||
|
host: host.to_string(),
|
||||||
|
ip: ip.into(),
|
||||||
|
hwaddr: hwaddr.into(),
|
||||||
|
local: MaybeString::from("1"),
|
||||||
|
ignore: Some(0),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to set up the test environment with an initial OPNsense configuration.
|
||||||
|
fn setup_test_env(initial_hosts: Vec<DnsmasqHost>) -> DhcpConfigDnsMasq<'static> {
|
||||||
|
let opnsense_config = Box::leak(Box::new(OPNsense {
|
||||||
|
dnsmasq: Some(DnsMasq {
|
||||||
|
hosts: initial_hosts,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
|
||||||
|
DhcpConfigDnsMasq::new(opnsense_config, Arc::new(DummyOPNSenseShell {}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_first_static_mapping() {
|
||||||
|
let mut dhcp_config = setup_test_env(vec![]);
|
||||||
|
let ip = Ipv4Addr::new(192, 168, 1, 10);
|
||||||
|
let mac = "00:11:22:33:44:55";
|
||||||
|
let hostname = "new-host";
|
||||||
|
|
||||||
|
dhcp_config.add_static_mapping(mac, ip, hostname).unwrap();
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
let host = &hosts[0];
|
||||||
|
assert_eq!(host.host, hostname);
|
||||||
|
assert_eq!(host.ip, ip.to_string().into());
|
||||||
|
assert_eq!(host.hwaddr.content_string(), mac);
|
||||||
|
assert!(Uuid::parse_str(&host.uuid).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hostname_split_into_host_domain() {
|
||||||
|
let mut dhcp_config = setup_test_env(vec![]);
|
||||||
|
let ip = Ipv4Addr::new(192, 168, 1, 10);
|
||||||
|
let mac = "00:11:22:33:44:55";
|
||||||
|
let hostname = "new-host";
|
||||||
|
let domain = "some.domain";
|
||||||
|
|
||||||
|
dhcp_config
|
||||||
|
.add_static_mapping(mac, ip, &format!("{hostname}.{domain}"))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
let host = &hosts[0];
|
||||||
|
assert_eq!(host.host, hostname);
|
||||||
|
assert_eq!(host.domain.content_string(), domain);
|
||||||
|
assert_eq!(host.ip, ip.to_string().into());
|
||||||
|
assert_eq!(host.hwaddr.content_string(), mac);
|
||||||
|
assert!(Uuid::parse_str(&host.uuid).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_mac_to_existing_host_by_ip_and_hostname() {
|
||||||
|
let initial_host = create_host(
|
||||||
|
"uuid-1",
|
||||||
|
"existing-host",
|
||||||
|
"192.168.1.20",
|
||||||
|
"AA:BB:CC:DD:EE:FF",
|
||||||
|
);
|
||||||
|
let mut dhcp_config = setup_test_env(vec![initial_host]);
|
||||||
|
let ip = Ipv4Addr::new(192, 168, 1, 20);
|
||||||
|
let new_mac = "00:11:22:33:44:55";
|
||||||
|
let hostname = "existing-host";
|
||||||
|
|
||||||
|
dhcp_config
|
||||||
|
.add_static_mapping(new_mac, ip, hostname)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
let host = &hosts[0];
|
||||||
|
assert_eq!(
|
||||||
|
host.hwaddr.content_string(),
|
||||||
|
"AA:BB:CC:DD:EE:FF,00:11:22:33:44:55"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_mac_to_existing_host_by_ip_only() {
|
||||||
|
let initial_host = create_host(
|
||||||
|
"uuid-1",
|
||||||
|
"existing-host",
|
||||||
|
"192.168.1.20",
|
||||||
|
"AA:BB:CC:DD:EE:FF",
|
||||||
|
);
|
||||||
|
let mut dhcp_config = setup_test_env(vec![initial_host]);
|
||||||
|
let ip = Ipv4Addr::new(192, 168, 1, 20);
|
||||||
|
let new_mac = "00:11:22:33:44:55";
|
||||||
|
|
||||||
|
// Using a different hostname should still find the host by IP and log a warning.
|
||||||
|
dhcp_config
|
||||||
|
.add_static_mapping(new_mac, ip, "different-host-name")
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
let host = &hosts[0];
|
||||||
|
assert_eq!(
|
||||||
|
host.hwaddr.content_string(),
|
||||||
|
"AA:BB:CC:DD:EE:FF,00:11:22:33:44:55"
|
||||||
|
);
|
||||||
|
assert_eq!(host.host, "existing-host"); // Original hostname should be preserved.
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_mac_to_existing_host_by_hostname_only() {
|
||||||
|
let initial_host = create_host(
|
||||||
|
"uuid-1",
|
||||||
|
"existing-host",
|
||||||
|
"192.168.1.20",
|
||||||
|
"AA:BB:CC:DD:EE:FF",
|
||||||
|
);
|
||||||
|
let mut dhcp_config = setup_test_env(vec![initial_host]);
|
||||||
|
let new_mac = "00:11:22:33:44:55";
|
||||||
|
let hostname = "existing-host";
|
||||||
|
|
||||||
|
// Using a different IP should still find the host by hostname and log a warning.
|
||||||
|
dhcp_config
|
||||||
|
.add_static_mapping(new_mac, Ipv4Addr::new(192, 168, 1, 99), hostname)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
let host = &hosts[0];
|
||||||
|
assert_eq!(
|
||||||
|
host.hwaddr.content_string(),
|
||||||
|
"AA:BB:CC:DD:EE:FF,00:11:22:33:44:55"
|
||||||
|
);
|
||||||
|
assert_eq!(host.ip.content_string(), "192.168.1.20"); // Original IP should be preserved.
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_duplicate_mac_to_host() {
|
||||||
|
let initial_mac = "AA:BB:CC:DD:EE:FF";
|
||||||
|
let initial_host = create_host("uuid-1", "host-1", "192.168.1.20", initial_mac);
|
||||||
|
let mut dhcp_config = setup_test_env(vec![initial_host]);
|
||||||
|
|
||||||
|
dhcp_config
|
||||||
|
.add_static_mapping(initial_mac, Ipv4Addr::new(192, 168, 1, 20), "host-1")
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
assert_eq!(hosts[0].hwaddr.content_string(), initial_mac); // No change, no duplication.
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_invalid_mac_address() {
|
||||||
|
let mut dhcp_config = setup_test_env(vec![]);
|
||||||
|
let result =
|
||||||
|
dhcp_config.add_static_mapping("invalid-mac", Ipv4Addr::new(10, 0, 0, 1), "host");
|
||||||
|
assert!(matches!(result, Err(DhcpError::InvalidMacAddress(_))));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_error_on_conflicting_ip_and_hostname() {
|
||||||
|
let host_a = create_host("uuid-a", "host-a", "192.168.1.10", "AA:AA:AA:AA:AA:AA");
|
||||||
|
let host_b = create_host("uuid-b", "host-b", "192.168.1.20", "BB:BB:BB:BB:BB:BB");
|
||||||
|
let mut dhcp_config = setup_test_env(vec![host_a, host_b]);
|
||||||
|
|
||||||
|
let result = dhcp_config.add_static_mapping(
|
||||||
|
"CC:CC:CC:CC:CC:CC",
|
||||||
|
Ipv4Addr::new(192, 168, 1, 10),
|
||||||
|
"host-b",
|
||||||
|
);
|
||||||
|
// This IP belongs to host-a, but the hostname belongs to host-b.
|
||||||
|
assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: IP 192.168.1.10 and hostname 'host-b' exist, but in different static host entries.".to_string())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_error_on_multiple_ip_matches() {
|
||||||
|
let host_a = create_host("uuid-a", "host-a", "192.168.1.30", "AA:AA:AA:AA:AA:AA");
|
||||||
|
let host_b = create_host("uuid-b", "host-b", "192.168.1.30", "BB:BB:BB:BB:BB:BB");
|
||||||
|
let mut dhcp_config = setup_test_env(vec![host_a, host_b]);
|
||||||
|
|
||||||
|
// This IP is ambiguous.
|
||||||
|
let result = dhcp_config.add_static_mapping(
|
||||||
|
"CC:CC:CC:CC:CC:CC",
|
||||||
|
Ipv4Addr::new(192, 168, 1, 30),
|
||||||
|
"new-host",
|
||||||
|
);
|
||||||
|
assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: Found multiple host entries matching IP 192.168.1.30 and/or hostname 'new-host'. Cannot resolve automatically.".to_string())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remove_mac_from_multi_mac_host() {
|
||||||
|
let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2,mac-3");
|
||||||
|
let mut dhcp_config = setup_test_env(vec![host]);
|
||||||
|
|
||||||
|
dhcp_config.remove_static_mapping("mac-2");
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
assert_eq!(hosts[0].hwaddr.content_string(), "mac-1,mac-3");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remove_last_mac_from_host() {
|
||||||
|
let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1");
|
||||||
|
let mut dhcp_config = setup_test_env(vec![host]);
|
||||||
|
|
||||||
|
dhcp_config.remove_static_mapping("mac-1");
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert!(hosts.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remove_non_existent_mac() {
|
||||||
|
let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2");
|
||||||
|
let mut dhcp_config = setup_test_env(vec![host.clone()]);
|
||||||
|
|
||||||
|
dhcp_config.remove_static_mapping("mac-nonexistent");
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 1);
|
||||||
|
assert_eq!(hosts[0], host); // The host should be unchanged.
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remove_mac_case_insensitively() {
|
||||||
|
let host = create_host("uuid-1", "host-1", "192.168.1.50", "AA:BB:CC:DD:EE:FF");
|
||||||
|
let mut dhcp_config = setup_test_env(vec![host]);
|
||||||
|
|
||||||
|
dhcp_config.remove_static_mapping("aa:bb:cc:dd:ee:ff");
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert!(hosts.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remove_mac_from_correct_host_only() {
|
||||||
|
let host1 = create_host(
|
||||||
|
"uuid-1",
|
||||||
|
"host-1",
|
||||||
|
"192.168.1.50",
|
||||||
|
"AA:AA:AA:AA:AA:AA,BB:BB:BB:BB:BB:BB",
|
||||||
|
);
|
||||||
|
let host2 = create_host(
|
||||||
|
"uuid-2",
|
||||||
|
"host-2",
|
||||||
|
"192.168.1.51",
|
||||||
|
"CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD",
|
||||||
|
);
|
||||||
|
let mut dhcp_config = setup_test_env(vec![host1.clone(), host2.clone()]);
|
||||||
|
|
||||||
|
dhcp_config.remove_static_mapping("AA:AA:AA:AA:AA:AA");
|
||||||
|
|
||||||
|
let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts;
|
||||||
|
assert_eq!(hosts.len(), 2);
|
||||||
|
let updated_host1 = hosts.iter().find(|h| h.uuid == "uuid-1").unwrap();
|
||||||
|
let unchanged_host2 = hosts.iter().find(|h| h.uuid == "uuid-2").unwrap();
|
||||||
|
|
||||||
|
assert_eq!(updated_host1.hwaddr.content_string(), "BB:BB:BB:BB:BB:BB");
|
||||||
|
assert_eq!(
|
||||||
|
unchanged_host2.hwaddr.content_string(),
|
||||||
|
"CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
1674
opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml
Normal file
1674
opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user