Compare commits

...

8 Commits

Author SHA1 Message Date
22875fe8f3 fix: updated test xml structures to match with new fields added to opnsense
All checks were successful
Run Check Script / check (pull_request) Successful in 1m32s
2025-12-17 15:00:48 -05:00
c6f859f973 fix(OPNSense): update fields for haproxyy and opnsense following most recent update and upgrade to opnsense
Some checks failed
Run Check Script / check (pull_request) Failing after 1m25s
2025-12-16 15:31:35 -05:00
bbf28a1a28 Merge branch 'master' into fix/opnsense_update
Some checks failed
Run Check Script / check (pull_request) Failing after 1m21s
2025-12-16 20:00:54 +00:00
bfdb11b217 Merge pull request 'feat(OKDInstallation): Implemented bootstrap of okd worker node, added features to allow both control plane and worker node to use the same bootstrap_okd_node score' (#198) from feat/okd-nodes into master
Some checks failed
Run Check Script / check (push) Successful in 1m57s
Compile and package harmony_composer / package_harmony_composer (push) Failing after 2m59s
Reviewed-on: #198
Reviewed-by: johnride <jg@nationtech.io>
2025-12-10 19:27:51 +00:00
d5fadf4f44 fix: deleted storage node role, fixed erroneous comment, modified score name to be in line with clean code naming conventions, fixed how the OKDNodeInstallationScore is called via OKDSetup03ControlPlaneScore and OKDSetup04WorkersScore
All checks were successful
Run Check Script / check (pull_request) Successful in 1m45s
2025-12-10 14:20:24 -05:00
50bd5c5bba feat(OKDInstallation): Implemented bootstrap of okd worker node, added features to allow both control plane and worker node to use the same bootstrap_okd_node score
All checks were successful
Run Check Script / check (pull_request) Successful in 1m46s
2025-12-10 12:15:07 -05:00
43a17811cc fix formatting
Some checks failed
Run Check Script / check (pull_request) Failing after 1m49s
2025-11-14 12:53:43 -05:00
29c82db70d fix: added fields missing for haproxy after most recent update
Some checks failed
Run Check Script / check (pull_request) Failing after 49s
2025-11-12 13:21:55 -05:00
21 changed files with 460 additions and 302 deletions

15
Cargo.lock generated
View File

@@ -6049,6 +6049,21 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
[[package]]
name = "test-score"
version = "0.1.0"
dependencies = [
"base64 0.22.1",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_types",
"log",
"tokio",
"url",
]
[[package]] [[package]]
name = "thiserror" name = "thiserror"
version = "1.0.69" version = "1.0.69"

View File

@@ -1,4 +1,6 @@
mod repository; mod repository;
use std::fmt;
pub use repository::*; pub use repository::*;
#[derive(Debug, new, Clone)] #[derive(Debug, new, Clone)]
@@ -69,5 +71,14 @@ pub enum HostRole {
Bootstrap, Bootstrap,
ControlPlane, ControlPlane,
Worker, Worker,
Storage, }
impl fmt::Display for HostRole {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
HostRole::Bootstrap => write!(f, "Bootstrap"),
HostRole::ControlPlane => write!(f, "ControlPlane"),
HostRole::Worker => write!(f, "Worker"),
}
}
} }

View File

@@ -1,20 +1,8 @@
use crate::{ use crate::{
data::Version, interpret::Interpret, inventory::HostRole, modules::okd::bootstrap_okd_node::OKDNodeInterpret,
hardware::PhysicalHost, score::Score, topology::HAClusterTopology,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
},
score::Score,
topology::{HAClusterTopology, HostBinding},
}; };
use async_trait::async_trait;
use derive_new::new; use derive_new::new;
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize; use serde::Serialize;
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
@@ -28,226 +16,13 @@ pub struct OKDSetup03ControlPlaneScore {}
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore { impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup03ControlPlaneInterpret::new()) // TODO: Implement a step to wait for the control plane nodes to join the cluster
// and for the cluster operators to become available. This would be similar to
// the `wait-for bootstrap-complete` command.
Box::new(OKDNodeInterpret::new(HostRole::ControlPlane))
} }
fn name(&self) -> String { fn name(&self) -> String {
"OKDSetup03ControlPlaneScore".to_string() "OKDSetup03ControlPlaneScore".to_string()
} }
} }
#[derive(Debug, Clone)]
pub struct OKDSetup03ControlPlaneInterpret {
version: Version,
status: InterpretStatus,
}
impl OKDSetup03ControlPlaneInterpret {
pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
status: InterpretStatus::QUEUED,
}
}
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
/// It will trigger discovery if not enough hosts are found.
async fn get_nodes(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Vec<PhysicalHost>, InterpretError> {
const REQUIRED_HOSTS: usize = 3;
let repo = InventoryRepositoryFactory::build().await?;
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
while control_plane_hosts.len() < REQUIRED_HOSTS {
info!(
"Discovery of {} control plane hosts in progress, current number {}",
REQUIRED_HOSTS,
control_plane_hosts.len()
);
// This score triggers the discovery agent for a specific role.
DiscoverHostForRoleScore {
role: HostRole::ControlPlane,
}
.interpret(inventory, topology)
.await?;
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
}
if control_plane_hosts.len() < REQUIRED_HOSTS {
Err(InterpretError::new(format!(
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
REQUIRED_HOSTS,
control_plane_hosts.len()
)))
} else {
// Take exactly the number of required hosts to ensure consistency.
Ok(control_plane_hosts
.into_iter()
.take(REQUIRED_HOSTS)
.collect())
}
}
/// Configures DHCP host bindings for all control plane nodes.
async fn configure_host_binding(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
nodes: &Vec<PhysicalHost>,
) -> Result<(), InterpretError> {
info!("[ControlPlane] Configuring host bindings for control plane nodes.");
// Ensure the topology definition matches the number of physical nodes found.
if topology.control_plane.len() != nodes.len() {
return Err(InterpretError::new(format!(
"Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).",
topology.control_plane.len(),
nodes.len()
)));
}
// Create a binding for each physical host to its corresponding logical host.
let bindings: Vec<HostBinding> = topology
.control_plane
.iter()
.zip(nodes.iter())
.map(|(logical_host, physical_host)| {
info!(
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
logical_host.name, physical_host.id
);
HostBinding {
logical_host: logical_host.clone(),
physical_host: physical_host.clone(),
}
})
.collect();
DhcpHostBindingScore {
host_binding: bindings,
domain: Some(topology.domain_name.clone()),
}
.interpret(inventory, topology)
.await?;
Ok(())
}
/// Renders and deploys a per-MAC iPXE boot file for each control plane node.
async fn configure_ipxe(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
nodes: &Vec<PhysicalHost>,
) -> Result<(), InterpretError> {
info!("[ControlPlane] Rendering per-MAC iPXE configurations.");
// The iPXE script content is the same for all control plane nodes,
// pointing to the 'master.ign' ignition file.
let content = BootstrapIpxeTpl {
http_ip: &topology.http_server.get_ip().to_string(),
scos_path: "scos",
ignition_http_path: "okd_ignition_files",
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
}
.to_string();
debug!("[ControlPlane] iPXE content template:\n{content}");
// Create and apply an iPXE boot file for each node.
for node in nodes {
let mac_address = node.get_mac_address();
if mac_address.is_empty() {
return Err(InterpretError::new(format!(
"Physical host with ID '{}' has no MAC addresses defined.",
node.id
)));
}
info!(
"[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}",
node.id, mac_address
);
IPxeMacBootFileScore {
mac_address,
content: content.clone(),
}
.interpret(inventory, topology)
.await?;
}
Ok(())
}
/// Prompts the user to reboot the target control plane nodes.
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
let confirmation = inquire::Confirm::new(
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
)
.prompt()
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
if !confirmation {
return Err(InterpretError::new(
"User aborted the operation.".to_string(),
));
}
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup03ControlPlane")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
// 1. Ensure we have 3 physical hosts for the control plane.
let nodes = self.get_nodes(inventory, topology).await?;
// 2. Create DHCP reservations for the control plane nodes.
self.configure_host_binding(inventory, topology, &nodes)
.await?;
// 3. Create iPXE files for each control plane node to boot from the master ignition.
self.configure_ipxe(inventory, topology, &nodes).await?;
// 4. Reboot the nodes to start the OS installation.
self.reboot_targets(&nodes).await?;
// TODO: Implement a step to wait for the control plane nodes to join the cluster
// and for the cluster operators to become available. This would be similar to
// the `wait-for bootstrap-complete` command.
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
Ok(Outcome::success(
"Control plane provisioning has been successfully initiated.".into(),
))
}
}

View File

@@ -1,15 +1,9 @@
use async_trait::async_trait;
use derive_new::new; use derive_new::new;
use harmony_types::id::Id;
use log::info;
use serde::Serialize; use serde::Serialize;
use crate::{ use crate::{
data::Version, interpret::Interpret, inventory::HostRole, modules::okd::bootstrap_okd_node::OKDNodeInterpret,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, score::Score, topology::HAClusterTopology,
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
}; };
// ------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------
@@ -23,61 +17,10 @@ pub struct OKDSetup04WorkersScore {}
impl Score<HAClusterTopology> for OKDSetup04WorkersScore { impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup04WorkersInterpret::new(self.clone())) Box::new(OKDNodeInterpret::new(HostRole::Worker))
} }
fn name(&self) -> String { fn name(&self) -> String {
"OKDSetup04WorkersScore".to_string() "OKDSetup04WorkersScore".to_string()
} }
} }
#[derive(Debug, Clone)]
pub struct OKDSetup04WorkersInterpret {
score: OKDSetup04WorkersScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup04WorkersInterpret {
pub fn new(score: OKDSetup04WorkersScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup04Workers")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.render_and_reboot().await?;
Ok(Outcome::success("Workers provisioned".into()))
}
}

View File

@@ -0,0 +1,303 @@
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize;
use crate::{
data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::IPxeMacBootFileScore,
inventory::DiscoverHostForRoleScore,
okd::{
okd_node::{
BootstrapRole, ControlPlaneRole, OKDRoleProperties, StorageRole, WorkerRole,
},
templates::BootstrapIpxeTpl,
},
},
score::Score,
topology::{HAClusterTopology, HostBinding, LogicalHost},
};
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDNodeInstallationScore {
host_role: HostRole,
}
impl Score<HAClusterTopology> for OKDNodeInstallationScore {
fn name(&self) -> String {
"OKDNodeScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDNodeInterpret::new(self.host_role.clone()))
}
}
#[derive(Debug, Clone)]
pub struct OKDNodeInterpret {
host_role: HostRole,
}
impl OKDNodeInterpret {
pub fn new(host_role: HostRole) -> Self {
Self { host_role }
}
fn okd_role_properties(&self, role: &HostRole) -> &'static dyn OKDRoleProperties {
match role {
HostRole::Bootstrap => &BootstrapRole,
HostRole::ControlPlane => &ControlPlaneRole,
HostRole::Worker => &WorkerRole,
}
}
async fn get_nodes(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Vec<PhysicalHost>, InterpretError> {
let repo = InventoryRepositoryFactory::build().await?;
let mut hosts = repo.get_host_for_role(&self.host_role).await?;
let okd_host_properties = self.okd_role_properties(&self.host_role);
let required_hosts: usize = okd_host_properties.required_hosts();
while hosts.len() < required_hosts {
info!(
"Discovery of {} {} hosts in progress, current number {}",
required_hosts,
self.host_role,
hosts.len()
);
// This score triggers the discovery agent for a specific role.
DiscoverHostForRoleScore {
role: self.host_role.clone(),
}
.interpret(inventory, topology)
.await?;
hosts = repo.get_host_for_role(&self.host_role).await?;
}
if hosts.len() < required_hosts {
Err(InterpretError::new(format!(
"OKD Requires at least {} {} hosts, but only found {}. Cannot proceed.",
required_hosts,
self.host_role,
hosts.len()
)))
} else {
// Take exactly the number of required hosts to ensure consistency.
Ok(hosts.into_iter().take(required_hosts).collect())
}
}
/// Configures DHCP host bindings for all nodes.
async fn configure_host_binding(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
nodes: &Vec<PhysicalHost>,
) -> Result<(), InterpretError> {
info!(
"[{}] Configuring host bindings for {} plane nodes.",
self.host_role, self.host_role,
);
let host_properties = self.okd_role_properties(&self.host_role);
self.validate_host_node_match(nodes, host_properties.logical_hosts(topology))?;
let bindings: Vec<HostBinding> =
self.host_bindings(nodes, host_properties.logical_hosts(topology));
DhcpHostBindingScore {
host_binding: bindings,
domain: Some(topology.domain_name.clone()),
}
.interpret(inventory, topology)
.await?;
Ok(())
}
// Ensure the topology definition matches the number of physical nodes found.
fn validate_host_node_match(
&self,
nodes: &Vec<PhysicalHost>,
hosts: &Vec<LogicalHost>,
) -> Result<(), InterpretError> {
if hosts.len() != nodes.len() {
return Err(InterpretError::new(format!(
"Mismatch between logical hosts defined in topology ({}) and physical nodes found ({}).",
hosts.len(),
nodes.len()
)));
}
Ok(())
}
// Create a binding for each physical host to its corresponding logical host.
fn host_bindings(
&self,
nodes: &Vec<PhysicalHost>,
hosts: &Vec<LogicalHost>,
) -> Vec<HostBinding> {
hosts
.iter()
.zip(nodes.iter())
.map(|(logical_host, physical_host)| {
info!(
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
logical_host.name, physical_host.id
);
HostBinding {
logical_host: logical_host.clone(),
physical_host: physical_host.clone(),
}
})
.collect()
}
/// Renders and deploys a per-MAC iPXE boot file for each node.
async fn configure_ipxe(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
nodes: &Vec<PhysicalHost>,
) -> Result<(), InterpretError> {
info!(
"[{}] Rendering per-MAC iPXE configurations.",
self.host_role
);
let okd_role_properties = self.okd_role_properties(&self.host_role);
// The iPXE script content is the same for all control plane nodes,
// pointing to the 'master.ign' ignition file.
let content = BootstrapIpxeTpl {
http_ip: &topology.http_server.get_ip().to_string(),
scos_path: "scos",
ignition_http_path: "okd_ignition_files",
//TODO must be refactored to not only use /dev/sda
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
ignition_file_name: okd_role_properties.ignition_file(),
}
.to_string();
debug!("[{}] iPXE content template:\n{content}", self.host_role);
// Create and apply an iPXE boot file for each node.
for node in nodes {
let mac_address = node.get_mac_address();
if mac_address.is_empty() {
return Err(InterpretError::new(format!(
"Physical host with ID '{}' has no MAC addresses defined.",
node.id
)));
}
info!(
"[{}] Applying iPXE config for node ID '{}' with MACs: {:?}",
self.host_role, node.id, mac_address
);
IPxeMacBootFileScore {
mac_address,
content: content.clone(),
}
.interpret(inventory, topology)
.await?;
}
Ok(())
}
/// Prompts the user to reboot the target control plane nodes.
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
info!(
"[{}] Requesting reboot for control plane nodes: {node_ids:?}",
self.host_role
);
let confirmation = inquire::Confirm::new(
&format!("Please reboot the {} {} nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), self.host_role, node_ids.join(", ")),
)
.prompt()
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
if !confirmation {
return Err(InterpretError::new(
"User aborted the operation.".to_string(),
));
}
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDNodeInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
// 1. Ensure we have the specfied number of physical hosts.
let nodes = self.get_nodes(inventory, topology).await?;
// 2. Create DHCP reservations for the nodes.
self.configure_host_binding(inventory, topology, &nodes)
.await?;
// 3. Create iPXE files for each node to boot from the ignition.
self.configure_ipxe(inventory, topology, &nodes).await?;
// 4. Reboot the nodes to start the OS installation.
self.reboot_targets(&nodes).await?;
// TODO: Implement a step to validate that the installation of the nodes is
// complete and for the cluster operators to become available.
//
// The OpenShift installer only provides two wait commands which currently need to be
// run manually:
// - `openshift-install wait-for bootstrap-complete`
// - `openshift-install wait-for install-complete`
//
// There is no installer command that waits specifically for worker node
// provisioning. Worker nodes join asynchronously (via ignition + CSR approval),
// and the cluster becomes fully functional only once all nodes are Ready and the
// cluster operators report Available=True.
info!(
"[{}] Provisioning initiated. Monitor the cluster convergence manually.",
self.host_role
);
Ok(Outcome::success(format!(
"{} provisioning has been successfully initiated.",
self.host_role
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDNodeSetup".into())
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -6,12 +6,14 @@ mod bootstrap_05_sanity_check;
mod bootstrap_06_installation_report; mod bootstrap_06_installation_report;
pub mod bootstrap_dhcp; pub mod bootstrap_dhcp;
pub mod bootstrap_load_balancer; pub mod bootstrap_load_balancer;
pub mod bootstrap_okd_node;
mod bootstrap_persist_network_bond; mod bootstrap_persist_network_bond;
pub mod dhcp; pub mod dhcp;
pub mod dns; pub mod dns;
pub mod installation; pub mod installation;
pub mod ipxe; pub mod ipxe;
pub mod load_balancer; pub mod load_balancer;
pub mod okd_node;
pub mod templates; pub mod templates;
pub mod upgrade; pub mod upgrade;
pub use bootstrap_01_prepare::*; pub use bootstrap_01_prepare::*;

View File

@@ -0,0 +1,54 @@
use crate::topology::{HAClusterTopology, LogicalHost};
pub trait OKDRoleProperties {
fn ignition_file(&self) -> &'static str;
fn required_hosts(&self) -> usize;
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost>;
}
pub struct BootstrapRole;
pub struct ControlPlaneRole;
pub struct WorkerRole;
pub struct StorageRole;
impl OKDRoleProperties for BootstrapRole {
fn ignition_file(&self) -> &'static str {
"bootstrap.ign"
}
fn required_hosts(&self) -> usize {
1
}
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
todo!()
}
}
impl OKDRoleProperties for ControlPlaneRole {
fn ignition_file(&self) -> &'static str {
"master.ign"
}
fn required_hosts(&self) -> usize {
3
}
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
&t.control_plane
}
}
impl OKDRoleProperties for WorkerRole {
fn ignition_file(&self) -> &'static str {
"worker.ign"
}
fn required_hosts(&self) -> usize {
2
}
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
&t.workers
}
}

View File

@@ -106,11 +106,37 @@ pub struct HAProxy {
pub groups: MaybeString, pub groups: MaybeString,
pub users: MaybeString, pub users: MaybeString,
pub cpus: MaybeString, pub cpus: MaybeString,
pub resolvers: MaybeString, pub resolvers: HAProxyResolvers,
pub mailers: MaybeString, pub mailers: MaybeString,
pub maintenance: Maintenance, pub maintenance: Maintenance,
} }
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyResolvers {
#[yaserde(rename = "resolver")]
pub resolver: Option<Resolver>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Resolver {
pub id: String,
pub enabled: i32,
pub name: String,
pub description: MaybeString,
pub nameservers: String,
pub parse_resolv_conf: String,
pub resolve_retries: i32,
pub timeout_resolve: String,
pub timeout_retry: String,
pub accepted_payload_size: MaybeString,
pub hold_valid: MaybeString,
pub hold_obsolete: MaybeString,
pub hold_refused: MaybeString,
pub hold_nx: MaybeString,
pub hold_timeout: MaybeString,
pub hold_other: MaybeString,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Maintenance { pub struct Maintenance {
#[yaserde(rename = "cronjobs")] #[yaserde(rename = "cronjobs")]

View File

@@ -136,6 +136,7 @@ pub struct Rule {
pub updated: Option<Updated>, pub updated: Option<Updated>,
pub created: Option<Created>, pub created: Option<Created>,
pub disabled: Option<MaybeString>, pub disabled: Option<MaybeString>,
pub log: Option<u32>,
} }
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
@@ -216,7 +217,7 @@ pub struct System {
pub maximumfrags: Option<MaybeString>, pub maximumfrags: Option<MaybeString>,
pub aliasesresolveinterval: Option<MaybeString>, pub aliasesresolveinterval: Option<MaybeString>,
pub maximumtableentries: Option<MaybeString>, pub maximumtableentries: Option<MaybeString>,
pub language: String, pub language: Option<String>,
pub dnsserver: Option<MaybeString>, pub dnsserver: Option<MaybeString>,
pub dns1gw: Option<String>, pub dns1gw: Option<String>,
pub dns2gw: Option<String>, pub dns2gw: Option<String>,
@@ -1140,6 +1141,7 @@ pub struct UnboundGeneral {
pub local_zone_type: String, pub local_zone_type: String,
pub outgoing_interface: MaybeString, pub outgoing_interface: MaybeString,
pub enable_wpad: MaybeString, pub enable_wpad: MaybeString,
pub safesearch: MaybeString,
} }
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
@@ -1193,15 +1195,15 @@ pub struct Acls {
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Dnsbl { pub struct Dnsbl {
pub enabled: i32, pub enabled: Option<i32>,
pub safesearch: MaybeString, pub safesearch: Option<MaybeString>,
#[yaserde(rename = "type")] #[yaserde(rename = "type")]
pub r#type: MaybeString, pub r#type: Option<MaybeString>,
pub lists: MaybeString, pub lists: Option<MaybeString>,
pub whitelists: MaybeString, pub whitelists: Option<MaybeString>,
pub blocklists: MaybeString, pub blocklists: Option<MaybeString>,
pub wildcards: MaybeString, pub wildcards: Option<MaybeString>,
pub address: MaybeString, pub address: Option<MaybeString>,
pub nxdomain: Option<i32>, pub nxdomain: Option<i32>,
} }
@@ -1229,6 +1231,7 @@ pub struct Host {
pub ttl: Option<MaybeString>, pub ttl: Option<MaybeString>,
pub server: String, pub server: String,
pub description: Option<String>, pub description: Option<String>,
pub txtdata: MaybeString,
} }
impl Host { impl Host {
@@ -1244,6 +1247,7 @@ impl Host {
ttl: Some(MaybeString::default()), ttl: Some(MaybeString::default()),
mx: MaybeString::default(), mx: MaybeString::default(),
description: None, description: None,
txtdata: MaybeString::default(),
} }
} }
} }
@@ -1291,6 +1295,7 @@ pub struct WireguardServerItem {
pub gateway: MaybeString, pub gateway: MaybeString,
pub carp_depend_on: MaybeString, pub carp_depend_on: MaybeString,
pub peers: String, pub peers: String,
pub debug: MaybeString,
pub endpoint: MaybeString, pub endpoint: MaybeString,
pub peer_dns: MaybeString, pub peer_dns: MaybeString,
} }

View File

@@ -612,6 +612,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad>0</enable_wpad> <enable_wpad>0</enable_wpad>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity>0</hideidentity> <hideidentity>0</hideidentity>

View File

@@ -2003,6 +2003,7 @@
<cacheflush/> <cacheflush/>
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<safesearch/>
<enable_wpad/> <enable_wpad/>
</general> </general>
<advanced> <advanced>
@@ -2071,6 +2072,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950"> <host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
<enabled>1</enabled> <enabled>1</enabled>
@@ -2081,6 +2083,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d"> <host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
<enabled>1</enabled> <enabled>1</enabled>
@@ -2091,6 +2094,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
</hosts> </hosts>
<aliases/> <aliases/>
@@ -2117,6 +2121,7 @@
<endpoint/> <endpoint/>
<peer_dns/> <peer_dns/>
<carp_depend_on/> <carp_depend_on/>
<debug/>
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers> <peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
</server> </server>
</servers> </servers>

View File

@@ -614,6 +614,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad>0</enable_wpad> <enable_wpad>0</enable_wpad>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity>0</hideidentity> <hideidentity>0</hideidentity>

View File

@@ -750,6 +750,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad>0</enable_wpad> <enable_wpad>0</enable_wpad>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity>0</hideidentity> <hideidentity>0</hideidentity>

View File

@@ -709,6 +709,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad>0</enable_wpad> <enable_wpad>0</enable_wpad>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity>0</hideidentity> <hideidentity>0</hideidentity>

View File

@@ -951,6 +951,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad/> <enable_wpad/>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity>0</hideidentity> <hideidentity>0</hideidentity>

View File

@@ -808,6 +808,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad/> <enable_wpad/>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity/> <hideidentity/>

View File

@@ -726,6 +726,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad/> <enable_wpad/>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity>0</hideidentity> <hideidentity>0</hideidentity>
@@ -793,6 +794,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950"> <host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
<enabled>1</enabled> <enabled>1</enabled>
@@ -803,6 +805,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d"> <host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
<enabled>1</enabled> <enabled>1</enabled>
@@ -813,6 +816,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
</hosts> </hosts>
<aliases/> <aliases/>
@@ -838,6 +842,7 @@
<gateway/> <gateway/>
<carp_depend_on/> <carp_depend_on/>
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers> <peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
<debug/>
<endpoint/> <endpoint/>
<peer_dns/> <peer_dns/>
</server> </server>

View File

@@ -718,6 +718,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad/> <enable_wpad/>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity>0</hideidentity> <hideidentity>0</hideidentity>
@@ -785,6 +786,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950"> <host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
<enabled>1</enabled> <enabled>1</enabled>
@@ -795,6 +797,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d"> <host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
<enabled>1</enabled> <enabled>1</enabled>
@@ -805,6 +808,7 @@
<mx/> <mx/>
<server>192.168.20.161</server> <server>192.168.20.161</server>
<description>Some app local</description> <description>Some app local</description>
<txtdata/>
</host> </host>
</hosts> </hosts>
<aliases/> <aliases/>
@@ -832,6 +836,7 @@
<gateway/> <gateway/>
<carp_depend_on/> <carp_depend_on/>
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers> <peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
<debug/>
</server> </server>
</servers> </servers>
</server> </server>

View File

@@ -869,6 +869,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad/> <enable_wpad/>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity/> <hideidentity/>

View File

@@ -862,6 +862,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad/> <enable_wpad/>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity/> <hideidentity/>

View File

@@ -869,6 +869,7 @@
<local_zone_type>transparent</local_zone_type> <local_zone_type>transparent</local_zone_type>
<outgoing_interface/> <outgoing_interface/>
<enable_wpad/> <enable_wpad/>
<safesearch/>
</general> </general>
<advanced> <advanced>
<hideidentity/> <hideidentity/>