chore: Reorganize file tree for easier onboarding. Rust project now at the root for simple git clone && cargo run

This commit is contained in:
2025-02-12 15:32:59 -05:00
parent 83b4efd625
commit 96bbef8195
144 changed files with 0 additions and 32 deletions

31
harmony/Cargo.toml Normal file
View File

@@ -0,0 +1,31 @@
[package]
name = "harmony"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
libredfish = "0.1.1"
reqwest = {version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde = { version = "1.0.209", features = ["derive"] }
serde_json = "1.0.127"
tokio = { workspace = true }
derive-new = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
async-trait = { workspace = true }
cidr = { workspace = true }
opnsense-config = { path = "../opnsense-config" }
opnsense-config-xml = { path = "../opnsense-config-xml" }
harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid = { workspace = true }
url = { workspace = true }
kube = { workspace = true }
k8s-openapi = { workspace = true }
serde_yaml = { workspace = true }
http = { workspace = true }

View File

@@ -0,0 +1,12 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Id {
value: String,
}
impl Id {
pub fn from_string(value: String) -> Self {
Self { value }
}
}

View File

@@ -0,0 +1,4 @@
mod id;
mod version;
pub use id::*;
pub use version::*;

View File

@@ -0,0 +1,76 @@
#[derive(Debug, Clone)]
pub struct Version {
value: semver::Version,
}
#[derive(Debug, Clone)]
pub struct VersionError {
_msg: String,
}
impl From<semver::Error> for VersionError {
fn from(value: semver::Error) -> Self {
Self {
_msg: value.to_string(),
}
}
}
impl Version {
pub fn from(val: &str) -> Result<Self, VersionError> {
Ok(Self {
value: semver::Version::parse(val)?,
})
}
}
impl<'de> serde::Deserialize<'de> for Version {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
semver::Version::parse(&s)
.map(|value| Version { value })
.map_err(serde::de::Error::custom)
}
}
impl serde::Serialize for Version {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.value.to_string().serialize(serializer)
}
}
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
return self.value.fmt(f);
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn version_serialize_deserialize() {
let v = "10.0.1331-ababa+b123";
let version = Version {
value: semver::Version::parse(v).unwrap(),
};
let s = serde_json::to_string(&version).unwrap();
let version2: Version = serde_json::from_str(&s).unwrap();
assert_eq!(version2.value.major, 10);
assert_eq!(version2.value.minor, 0);
assert_eq!(version2.value.patch, 1331);
assert_eq!(version2.value.build.to_string(), "b123");
assert_eq!(version2.value.pre.to_string(), "ababa");
assert_eq!(version2.value.to_string(), v);
}
}

View File

@@ -0,0 +1,36 @@
use std::fmt;
use async_trait::async_trait;
use super::topology::IpAddress;
#[derive(Debug)]
pub enum ExecutorError {
NetworkError(String),
AuthenticationError(String),
ConfigurationError(String),
UnexpectedError(String),
}
impl fmt::Display for ExecutorError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ExecutorError::NetworkError(msg) => write!(f, "Network error: {}", msg),
ExecutorError::AuthenticationError(msg) => write!(f, "Authentication error: {}", msg),
ExecutorError::ConfigurationError(msg) => write!(f, "Configuration error: {}", msg),
ExecutorError::UnexpectedError(msg) => write!(f, "Unexpected error: {}", msg),
}
}
}
impl std::error::Error for ExecutorError {}
#[async_trait]
pub trait SshClient {
async fn test_connection(
&self,
address: IpAddress,
username: &str,
password: &str,
) -> Result<(), ExecutorError>;
}

View File

@@ -0,0 +1,15 @@
use derive_new::new;
#[derive(Debug, Clone)]
pub enum FilterKind {
Label,
Kind,
}
pub type FilterValue = String;
#[derive(Debug, new, Clone)]
pub struct Filter {
_kind: FilterKind,
_value: FilterValue,
}

View File

View File

View File

@@ -0,0 +1,180 @@
use std::sync::Arc;
use derive_new::new;
use harmony_types::net::MacAddress;
pub type HostGroup = Vec<PhysicalHost>;
pub type SwitchGroup = Vec<Switch>;
pub type FirewallGroup = Vec<PhysicalHost>;
#[derive(Debug, Clone)]
pub struct PhysicalHost {
pub category: HostCategory,
pub network: Vec<NetworkInterface>,
pub management: Arc<dyn ManagementInterface>,
pub storage: Vec<Storage>,
pub labels: Vec<Label>,
pub memory_size: Option<u64>,
pub cpu_count: Option<u64>,
}
impl PhysicalHost {
pub fn empty(category: HostCategory) -> Self {
Self {
category,
network: vec![],
storage: vec![],
labels: vec![],
management: Arc::new(ManualManagementInterface {}),
memory_size: None,
cpu_count: None,
}
}
pub fn cluster_mac(&self) -> MacAddress {
self.network
.get(0)
.expect("Cluster physical host should have a network interface")
.mac_address
.clone()
}
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {
self.cpu_count = cpu_count;
self
}
pub fn memory_size(mut self, memory_size: Option<u64>) -> Self {
self.memory_size = memory_size;
self
}
pub fn storage(
mut self,
connection: StorageConnectionType,
kind: StorageKind,
size: u64,
serial: String,
) -> Self {
self.storage.push(Storage {
connection,
kind,
size,
serial,
});
self
}
pub fn mac_address(mut self, mac_address: MacAddress) -> Self {
self.network.push(NetworkInterface {
name: None,
mac_address,
speed: None,
});
self
}
pub fn label(mut self, name: String, value: String) -> Self {
self.labels.push(Label {
_name: name,
_value: value,
});
self
}
pub fn management(mut self, management: Arc<dyn ManagementInterface>) -> Self {
self.management = management;
self
}
}
#[derive(new)]
pub struct ManualManagementInterface;
impl ManagementInterface for ManualManagementInterface {
fn boot_to_pxe(&self) {
todo!()
}
fn get_supported_protocol_names(&self) -> String {
todo!()
}
}
pub trait ManagementInterface: Send + Sync {
fn boot_to_pxe(&self);
fn get_supported_protocol_names(&self) -> String;
}
impl std::fmt::Debug for dyn ManagementInterface {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"ManagementInterface protocols : {}",
self.get_supported_protocol_names(),
))
}
}
#[derive(Debug, Clone)]
pub enum HostCategory {
Server,
Firewall,
Switch,
}
#[derive(Debug, new, Clone)]
pub struct NetworkInterface {
pub name: Option<String>,
pub mac_address: MacAddress,
pub speed: Option<u64>,
}
#[derive(Debug, new, Clone)]
pub enum StorageConnectionType {
Sata3g,
Sata6g,
Sas6g,
Sas12g,
PCIE,
}
#[derive(Debug, Clone)]
pub enum StorageKind {
SSD,
NVME,
HDD,
}
#[derive(Debug, new, Clone)]
pub struct Storage {
pub connection: StorageConnectionType,
pub kind: StorageKind,
pub size: u64,
pub serial: String,
}
#[derive(Debug, Clone)]
pub struct Switch {
_interface: Vec<NetworkInterface>,
_management_interface: NetworkInterface,
}
#[derive(Debug, new, Clone)]
pub struct Label {
_name: String,
_value: String,
}
pub type Address = String;
#[derive(new, Debug)]
pub struct Location {
pub address: Address,
pub name: String,
}
impl Location {
pub fn test_building() -> Location {
Self {
address: String::new(),
name: String::new(),
}
}
}

View File

View File

View File

View File

@@ -0,0 +1,124 @@
use std::error::Error;
use async_trait::async_trait;
use derive_new::new;
use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::HAClusterTopology,
};
pub enum InterpretName {
OPNSenseDHCP,
OPNSenseDns,
LoadBalancer,
Tftp,
Http,
Dummy,
Panic,
OPNSense,
}
impl std::fmt::Display for InterpretName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
InterpretName::OPNSenseDHCP => f.write_str("OPNSenseDHCP"),
InterpretName::OPNSenseDns => f.write_str("OPNSenseDns"),
InterpretName::LoadBalancer => f.write_str("LoadBalancer"),
InterpretName::Tftp => f.write_str("Tftp"),
InterpretName::Http => f.write_str("Http"),
InterpretName::Dummy => f.write_str("Dummy"),
InterpretName::Panic => f.write_str("Panic"),
InterpretName::OPNSense => f.write_str("OPNSense"),
}
}
}
#[async_trait]
pub trait Interpret: std::fmt::Debug + Send {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError>;
fn get_name(&self) -> InterpretName;
fn get_version(&self) -> Version;
fn get_status(&self) -> InterpretStatus;
fn get_children(&self) -> Vec<Id>;
}
#[derive(Debug, new, Clone)]
pub struct Outcome {
pub status: InterpretStatus,
pub message: String,
}
impl Outcome {
pub fn noop() -> Self {
Self {
status: InterpretStatus::NOOP,
message: String::new(),
}
}
pub fn success(message: String) -> Self {
Self {
status: InterpretStatus::SUCCESS,
message,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum InterpretStatus {
SUCCESS,
FAILURE,
RUNNING,
QUEUED,
BLOCKED,
NOOP,
}
impl std::fmt::Display for InterpretStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let msg = match self {
InterpretStatus::SUCCESS => "SUCCESS",
InterpretStatus::FAILURE => "FAILURE",
InterpretStatus::RUNNING => "RUNNING",
InterpretStatus::QUEUED => "QUEUED",
InterpretStatus::BLOCKED => "BLOCKED",
InterpretStatus::NOOP => "NO_OP",
};
f.write_str(msg)
}
}
#[derive(Debug, Clone, new)]
pub struct InterpretError {
msg: String,
}
impl std::fmt::Display for InterpretError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.msg)
}
}
impl Error for InterpretError {}
impl From<ExecutorError> for InterpretError {
fn from(value: ExecutorError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<kube::Error> for InterpretError {
fn from(value: kube::Error) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}

View File

@@ -0,0 +1,47 @@
#[derive(Debug, new, Clone)]
pub struct InventoryFilter {
target: Vec<Filter>,
}
pub struct InventorySlice;
impl InventoryFilter {
pub fn apply(&self, _inventory: &Inventory) -> InventorySlice {
info!("Applying inventory filter {:?}", self.target);
todo!("TODO apply inventory filter, refactor as a slice")
}
}
use derive_new::new;
use log::info;
use super::{
filter::Filter,
hardware::{FirewallGroup, HostGroup, Location, SwitchGroup},
};
#[derive(Debug)]
pub struct Inventory {
pub location: Location,
pub switch: SwitchGroup,
// Firewall is really just a host but with somewhat specialized hardware
// I'm not entirely sure it belongs to its own category but it helps make things easier and
// clearer for now so let's try it this way.
pub firewall: FirewallGroup,
pub worker_host: HostGroup,
pub storage_host: HostGroup,
pub control_plane_host: HostGroup,
}
impl Inventory {
pub fn autoload() -> Self {
Self {
location: Location::test_building(),
switch: SwitchGroup::new(),
firewall: FirewallGroup::new(),
worker_host: HostGroup::new(),
storage_host: HostGroup::new(),
control_plane_host: HostGroup::new(),
}
}
}

View File

@@ -0,0 +1,50 @@
use std::sync::{Arc, RwLock};
use log::info;
use super::{
interpret::{Interpret, InterpretError, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
};
type ScoreVec = Vec<Box<dyn Score>>;
pub struct Maestro {
inventory: Inventory,
topology: HAClusterTopology,
scores: Arc<RwLock<ScoreVec>>,
}
impl Maestro {
pub fn new(inventory: Inventory, topology: HAClusterTopology) -> Self {
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
}
}
pub fn start(&mut self) {
info!("Starting Maestro");
}
pub fn register_all(&mut self, mut scores: ScoreVec) {
let mut score_mut = self.scores.write().expect("Should acquire lock");
score_mut.append(&mut scores);
}
pub async fn interpret(&self, score: Box<dyn Score>) -> Result<Outcome, InterpretError> {
info!("Running score {score:?}");
let interpret = score.create_interpret();
info!("Launching interpret {interpret:?}");
let result = interpret.execute(&self.inventory, &self.topology).await;
info!("Got result {result:?}");
result
}
pub fn scores(&self) -> Arc<RwLock<ScoreVec>> {
self.scores.clone()
}
}

View File

@@ -0,0 +1,9 @@
pub mod data;
pub mod executors;
pub mod filter;
pub mod hardware;
pub mod interpret;
pub mod inventory;
pub mod maestro;
pub mod score;
pub mod topology;

View File

@@ -0,0 +1,7 @@
use super::interpret::Interpret;
pub trait Score: std::fmt::Debug + Send + Sync {
fn create_interpret(&self) -> Box<dyn Interpret>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn Score>;
}

View File

@@ -0,0 +1,232 @@
use async_trait::async_trait;
use harmony_macros::ip;
use harmony_types::net::MacAddress;
use crate::executors::ExecutorError;
use super::DHCPStaticEntry;
use super::DhcpServer;
use super::DnsRecord;
use super::DnsRecordType;
use super::DnsServer;
use super::Firewall;
use super::HttpServer;
use super::IpAddress;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::Router;
use super::TftpServer;
use super::Url;
use super::openshift::OpenshiftClient;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct HAClusterTopology {
pub domain_name: String,
pub router: Arc<dyn Router>,
pub load_balancer: Arc<dyn LoadBalancer>,
pub firewall: Arc<dyn Firewall>,
pub dhcp_server: Arc<dyn DhcpServer>,
pub tftp_server: Arc<dyn TftpServer>,
pub http_server: Arc<dyn HttpServer>,
pub dns_server: Arc<dyn DnsServer>,
pub bootstrap_host: LogicalHost,
pub control_plane: Vec<LogicalHost>,
pub workers: Vec<LogicalHost>,
pub switch: Vec<LogicalHost>,
}
impl HAClusterTopology {
pub async fn oc_client(&self) -> Result<Arc<OpenshiftClient>, kube::Error> {
Ok(Arc::new(OpenshiftClient::try_default().await?))
}
pub fn autoload() -> Self {
let dummy_infra = Arc::new(DummyInfra {});
let dummy_host = LogicalHost {
ip: ip!("0.0.0.0"),
name: "dummyhost".to_string(),
};
Self {
domain_name: "DummyTopology".to_string(),
router: dummy_infra.clone(),
load_balancer: dummy_infra.clone(),
firewall: dummy_infra.clone(),
dhcp_server: dummy_infra.clone(),
tftp_server: dummy_infra.clone(),
http_server: dummy_infra.clone(),
dns_server: dummy_infra.clone(),
bootstrap_host: dummy_host,
control_plane: vec![],
workers: vec![],
switch: vec![],
}
}
}
struct DummyInfra;
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";
impl Router for DummyInfra {
fn get_gateway(&self) -> super::IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_cidr(&self) -> cidr::Ipv4Cidr {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_host(&self) -> LogicalHost {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
impl Firewall for DummyInfra {
fn add_rule(
&mut self,
_rule: super::FirewallRule,
) -> Result<(), crate::executors::ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn remove_rule(&mut self, _rule_id: &str) -> Result<(), crate::executors::ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn list_rules(&self) -> Vec<super::FirewallRule> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> super::IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_host(&self) -> LogicalHost {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]
impl DhcpServer for DummyInfra {
async fn add_static_mapping(&self, _entry: &DHCPStaticEntry) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn remove_static_mapping(&self, _mac: &MacAddress) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_next_server(&self, _ip: IpAddress) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_host(&self) -> LogicalHost {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]
impl LoadBalancer for DummyInfra {
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_host(&self) -> LogicalHost {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn add_service(&self, _service: &LoadBalancerService) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn remove_service(&self, _service: &LoadBalancerService) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn list_services(&self) -> Vec<LoadBalancerService> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]
impl TftpServer for DummyInfra {
async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_ip(&self, _ip: IpAddress) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]
impl HttpServer for DummyInfra {
async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]
impl DnsServer for DummyInfra {
async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn remove_record(
&mut self,
_name: &str,
_record_type: DnsRecordType,
) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn list_records(&self) -> Vec<DnsRecord> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_host(&self) -> LogicalHost {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}

View File

@@ -0,0 +1,17 @@
use derive_new::new;
use crate::hardware::PhysicalHost;
use super::LogicalHost;
/// Represents the binding between a LogicalHost and a PhysicalHost.
///
/// This is the only construct that directly maps a logical host to a physical host.
/// It serves as a bridge between the logical cluster structure and the physical infrastructure.
#[derive(Debug, new, Clone)]
pub struct HostBinding {
/// Reference to the LogicalHost
pub logical_host: LogicalHost,
/// Reference to the PhysicalHost
pub physical_host: PhysicalHost,
}

View File

@@ -0,0 +1,24 @@
use crate::executors::ExecutorError;
use async_trait::async_trait;
use super::{IpAddress, Url};
#[async_trait]
pub trait HttpServer: Send + Sync {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
// async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError>;
async fn ensure_initialized(&self) -> Result<(), ExecutorError>;
async fn commit_config(&self) -> Result<(), ExecutorError>;
async fn reload_restart(&self) -> Result<(), ExecutorError>;
}
impl std::fmt::Debug for dyn HttpServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"HttpServer serving files at {}",
self.get_ip()
))
}
}

View File

@@ -0,0 +1,105 @@
use std::{net::SocketAddr, str::FromStr};
use async_trait::async_trait;
use log::debug;
use super::{IpAddress, LogicalHost};
use crate::executors::ExecutorError;
#[async_trait]
pub trait LoadBalancer: Send + Sync {
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError>;
async fn remove_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError>;
async fn list_services(&self) -> Vec<LoadBalancerService>;
async fn ensure_initialized(&self) -> Result<(), ExecutorError>;
async fn commit_config(&self) -> Result<(), ExecutorError>;
async fn reload_restart(&self) -> Result<(), ExecutorError>;
async fn ensure_service_exists(
&self,
service: &LoadBalancerService,
) -> Result<(), ExecutorError> {
debug!(
"Listing LoadBalancer services {:?}",
self.list_services().await
);
if !self.list_services().await.contains(service) {
self.add_service(service).await?;
}
Ok(())
}
}
impl std::fmt::Debug for dyn LoadBalancer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct LoadBalancerService {
pub backend_servers: Vec<BackendServer>,
pub listening_port: SocketAddr,
pub health_check: Option<HealthCheck>,
}
#[derive(Debug, PartialEq, Clone)]
pub struct BackendServer {
pub address: String,
pub port: u16,
}
#[derive(Debug, Clone, PartialEq)]
pub enum HttpMethod {
GET,
POST,
PUT,
PATCH,
DELETE,
}
impl From<String> for HttpMethod {
fn from(value: String) -> Self {
Self::from_str(&value).unwrap()
}
}
impl FromStr for HttpMethod {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_uppercase().as_str() {
"GET" => Ok(HttpMethod::GET),
"POST" => Ok(HttpMethod::POST),
"PUT" => Ok(HttpMethod::PUT),
"PATCH" => Ok(HttpMethod::PATCH),
"DELETE" => Ok(HttpMethod::DELETE),
_ => Err(format!("Invalid HTTP method: {}", s)),
}
}
}
impl std::fmt::Display for HttpMethod {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HttpMethod::GET => write!(f, "GET"),
HttpMethod::POST => write!(f, "POST"),
HttpMethod::PUT => write!(f, "PUT"),
HttpMethod::PATCH => write!(f, "PATCH"),
HttpMethod::DELETE => write!(f, "DELETE"),
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum HttpStatusCode {
Success2xx,
UserError4xx,
ServerError5xx,
}
#[derive(Debug, Clone, PartialEq)]
pub enum HealthCheck {
HTTP(String, HttpMethod, HttpStatusCode),
TCP(Option<u16>),
}

View File

@@ -0,0 +1,126 @@
mod ha_cluster;
mod host_binding;
mod http;
mod load_balancer;
pub mod openshift;
mod router;
mod tftp;
pub use ha_cluster::*;
pub use load_balancer::*;
pub use router::*;
mod network;
pub use host_binding::*;
pub use http::*;
pub use network::*;
pub use tftp::*;
use std::{net::IpAddr, sync::Arc};
pub type IpAddress = IpAddr;
#[derive(Debug, Clone)]
pub enum Url {
LocalFolder(String),
Remote(url::Url),
}
impl std::fmt::Display for Url {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Url::LocalFolder(path) => write!(f, "{}", path),
Url::Remote(url) => write!(f, "{}", url),
}
}
}
/// Represents a logical member of a cluster that provides one or more services.
///
/// A LogicalHost can represent various roles within the infrastructure, such as:
/// - A firewall appliance hosting DHCP, DNS, PXE, and load balancer services
/// - A Kubernetes worker node
/// - A combined Kubernetes worker and Ceph storage node
/// - A control plane node
///
/// This abstraction focuses on the logical role and services, independent of the physical hardware.
#[derive(Debug, Clone)]
pub struct LogicalHost {
/// The IP address of this logical host.
pub ip: IpAddress,
/// The name of this logical host.
pub name: String,
}
impl LogicalHost {
/// Creates a list of `LogicalHost` instances.
///
/// # Arguments
///
/// * `number_hosts` - The number of logical hosts to create.
/// * `start_ip` - The starting IP address. Each subsequent host's IP will be incremented.
/// * `hostname_prefix` - The prefix for the host names. Host names will be in the form `prefix<index>`.
///
/// # Returns
///
/// A `Vec<LogicalHost>` containing the specified number of logical hosts, each with a unique IP and name.
///
/// # Panics
///
/// This function will panic if adding `number_hosts` to `start_ip` exceeds the valid range of IP addresses.
///
/// # Examples
///
/// ```
/// use std::str::FromStr;
/// use harmony::topology::{IpAddress, LogicalHost};
///
/// let start_ip = IpAddress::from_str("192.168.0.20").unwrap();
/// let hosts = LogicalHost::create_hosts(3, start_ip, "worker");
///
/// assert_eq!(hosts.len(), 3);
/// assert_eq!(hosts[0].ip, IpAddress::from_str("192.168.0.20").unwrap());
/// assert_eq!(hosts[0].name, "worker0");
/// assert_eq!(hosts[1].ip, IpAddress::from_str("192.168.0.21").unwrap());
/// assert_eq!(hosts[1].name, "worker1");
/// assert_eq!(hosts[2].ip, IpAddress::from_str("192.168.0.22").unwrap());
/// assert_eq!(hosts[2].name, "worker2");
/// ```
pub fn create_hosts(
number_hosts: u32,
start_ip: IpAddress,
hostname_prefix: &str,
) -> Vec<LogicalHost> {
let mut hosts = Vec::with_capacity(number_hosts.try_into().unwrap());
for i in 0..number_hosts {
let new_ip = increment_ip(start_ip, i).expect("IP address overflow");
let name = format!("{}{}", hostname_prefix, i);
hosts.push(LogicalHost { ip: new_ip, name });
}
hosts
}
}
/// Increments an IP address by a given value.
///
/// # Arguments
///
/// * `ip` - The starting IP address.
/// * `increment` - The amount to add to the IP address.
///
/// # Returns
///
/// A new `IpAddress` that is the result of incrementing the original by `increment`.
///
/// # Panics
///
/// This function panics if the resulting IP address exceeds the valid range.
fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> {
match ip {
IpAddress::V4(ipv4) => {
let new_ip = u32::from(ipv4) + increment;
Some(IpAddress::V4(new_ip.into()))
}
IpAddress::V6(_) => {
todo!("Ipv6 not supported yet")
}
}
}

View File

@@ -0,0 +1,296 @@
use std::{net::Ipv4Addr, str::FromStr};
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use crate::executors::ExecutorError;
use super::{IpAddress, LogicalHost};
#[derive(Debug)]
pub struct DHCPStaticEntry {
pub name: String,
pub mac: MacAddress,
pub ip: Ipv4Addr,
}
impl std::fmt::Display for DHCPStaticEntry {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"DHCPStaticEntry : name {}, mac {}, ip {}",
self.name, self.mac, self.ip
))
}
}
pub trait Firewall: Send + Sync {
fn add_rule(&mut self, rule: FirewallRule) -> Result<(), ExecutorError>;
fn remove_rule(&mut self, rule_id: &str) -> Result<(), ExecutorError>;
fn list_rules(&self) -> Vec<FirewallRule>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
}
impl std::fmt::Debug for dyn Firewall {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
}
}
pub struct NetworkDomain {
pub name: String,
}
#[async_trait]
pub trait DhcpServer: Send + Sync {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>;
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
async fn commit_config(&self) -> Result<(), ExecutorError>;
}
impl std::fmt::Debug for dyn DhcpServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("DhcpServer {}", self.get_ip()))
}
}
#[async_trait]
pub trait DnsServer: Send + Sync {
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError>;
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError>;
fn remove_record(
&mut self,
name: &str,
record_type: DnsRecordType,
) -> Result<(), ExecutorError>;
async fn list_records(&self) -> Vec<DnsRecord>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
async fn commit_config(&self) -> Result<(), ExecutorError>;
async fn ensure_hosts_registered(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
let current_hosts = self.list_records().await;
let mut hosts_to_register = vec![];
for host in hosts {
if !current_hosts.iter().any(|h| h == &host) {
hosts_to_register.push(host);
}
}
if !hosts_to_register.is_empty() {
self.register_hosts(hosts_to_register).await?;
}
Ok(())
}
}
impl std::fmt::Debug for dyn DnsServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
}
}
#[derive(Clone, Debug)]
pub struct FirewallRule {
pub id: String,
pub source: IpAddress,
pub destination: IpAddress,
pub port: u16,
pub protocol: Protocol,
pub action: Action,
}
#[derive(Clone, Debug)]
pub enum Protocol {
TCP,
UDP,
ICMP,
}
#[derive(Clone, Debug)]
pub enum Action {
Allow,
Deny,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum DnsRecordType {
A,
AAAA,
CNAME,
MX,
TXT,
}
impl std::fmt::Display for DnsRecordType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DnsRecordType::A => write!(f, "A"),
DnsRecordType::AAAA => write!(f, "AAAA"),
DnsRecordType::CNAME => write!(f, "CNAME"),
DnsRecordType::MX => write!(f, "MX"),
DnsRecordType::TXT => write!(f, "TXT"),
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct DnsRecord {
pub host: String,
pub domain: String,
pub record_type: DnsRecordType,
pub value: IpAddress,
}
impl FromStr for DnsRecordType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"A" => Ok(DnsRecordType::A),
"AAAA" => Ok(DnsRecordType::AAAA),
"CNAME" => Ok(DnsRecordType::CNAME),
"MX" => Ok(DnsRecordType::MX),
"TXT" => Ok(DnsRecordType::TXT),
_ => Err(format!("Unknown DNSRecordType {s}")),
}
}
}
#[cfg(test)]
mod test {
use std::sync::Arc;
use tokio::sync::RwLock;
use super::*;
#[tokio::test]
async fn test_ensure_hosts_registered_no_new_hosts() {
let server = DummyDnsServer::default();
let existing_host = DnsRecord {
host: "existing".to_string(),
domain: "example.com".to_string(),
record_type: DnsRecordType::A,
value: IpAddress::V4(Ipv4Addr::new(192, 168, 1, 2)),
};
server
.register_hosts(vec![existing_host.clone()])
.await
.unwrap();
let new_hosts = vec![
existing_host, // already exists
];
server.ensure_hosts_registered(new_hosts).await.unwrap();
assert_eq!(server.list_records().await.len(), 1);
}
#[tokio::test]
async fn test_ensure_hosts_registered_with_new_hosts() {
let server = DummyDnsServer::default();
let existing_host = DnsRecord {
host: "existing".to_string(),
domain: "example.com".to_string(),
record_type: DnsRecordType::A,
value: IpAddress::V4(Ipv4Addr::new(192, 168, 1, 2)),
};
server
.register_hosts(vec![existing_host.clone()])
.await
.unwrap();
let new_hosts = vec![
existing_host.clone(), // already exists
DnsRecord {
host: "new".to_string(),
domain: "example.com".to_string(),
record_type: DnsRecordType::A,
value: IpAddress::V4(Ipv4Addr::new(192, 168, 1, 3)),
},
];
server.ensure_hosts_registered(new_hosts).await.unwrap();
assert_eq!(server.list_records().await.len(), 2);
}
#[tokio::test]
async fn test_ensure_hosts_registered_no_hosts() {
let server = DummyDnsServer::default();
let new_hosts = vec![];
server.ensure_hosts_registered(new_hosts).await.unwrap();
assert_eq!(server.list_records().await.len(), 0);
}
#[tokio::test]
async fn test_ensure_existing_host_kept_no_new_host() {
let server = DummyDnsServer::default();
let new_hosts = vec![];
server.ensure_hosts_registered(new_hosts).await.unwrap();
assert_eq!(server.list_records().await.len(), 0);
}
#[async_trait::async_trait]
impl DnsServer for DummyDnsServer {
async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
Ok(())
}
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
self.hosts.write().await.extend(hosts);
Ok(())
}
fn remove_record(
&mut self,
_name: &str,
_record_type: DnsRecordType,
) -> Result<(), ExecutorError> {
Ok(())
}
async fn list_records(&self) -> Vec<DnsRecord> {
self.hosts.read().await.clone()
}
fn get_ip(&self) -> IpAddress {
IpAddress::V4(Ipv4Addr::new(192, 168, 0, 1))
}
fn get_host(&self) -> LogicalHost {
LogicalHost {
ip: self.get_ip(),
name: "dummy-host".to_string(),
}
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
Ok(())
}
}
struct DummyDnsServer {
hosts: Arc<RwLock<Vec<DnsRecord>>>,
}
impl Default for DummyDnsServer {
fn default() -> Self {
DummyDnsServer {
hosts: Arc::new(RwLock::new(vec![])),
}
}
}
}

View File

@@ -0,0 +1,55 @@
use k8s_openapi::NamespaceResourceScope;
use kube::{Api, Client, Error, Resource, api::PostParams};
use serde::de::DeserializeOwned;
pub struct OpenshiftClient {
client: Client,
}
impl OpenshiftClient {
pub async fn try_default() -> Result<Self, Error> {
Ok(Self {
client: Client::try_default().await?,
})
}
pub async fn apply_all<
K: Resource<Scope = NamespaceResourceScope>
+ std::fmt::Debug
+ Sync
+ DeserializeOwned
+ Default
+ serde::Serialize
+ Clone,
>(
&self,
resource: &Vec<K>,
) -> Result<Vec<K>, kube::Error>
where
<K as kube::Resource>::DynamicType: Default,
{
let mut result = vec![];
for r in resource.iter() {
let api: Api<K> = Api::all(self.client.clone());
result.push(api.create(&PostParams::default(), &r).await?);
}
Ok(result)
}
pub async fn apply_namespaced<K>(&self, resource: &Vec<K>) -> Result<K, Error>
where
K: Resource<Scope = NamespaceResourceScope>
+ Clone
+ std::fmt::Debug
+ DeserializeOwned
+ serde::Serialize
+ Default,
<K as kube::Resource>::DynamicType: Default,
{
for r in resource.iter() {
let api: Api<K> = Api::default_namespaced(self.client.clone());
api.create(&PostParams::default(), &r).await?;
}
todo!("")
}
}

View File

@@ -0,0 +1,40 @@
use cidr::Ipv4Cidr;
use derive_new::new;
use super::{IpAddress, LogicalHost};
pub trait Router: Send + Sync {
fn get_gateway(&self) -> IpAddress;
fn get_cidr(&self) -> Ipv4Cidr;
fn get_host(&self) -> LogicalHost;
}
impl std::fmt::Debug for dyn Router {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"Router Gateway : {}, CIDR : {}",
self.get_gateway(),
self.get_cidr()
))
}
}
#[derive(new)]
pub struct UnmanagedRouter {
gateway: IpAddress,
cidr: Ipv4Cidr,
}
impl Router for UnmanagedRouter {
fn get_gateway(&self) -> IpAddress {
self.gateway.clone()
}
fn get_cidr(&self) -> Ipv4Cidr {
self.cidr.clone()
}
fn get_host(&self) -> LogicalHost {
todo!()
}
}

View File

@@ -0,0 +1,24 @@
use crate::executors::ExecutorError;
use async_trait::async_trait;
use super::{IpAddress, Url};
#[async_trait]
pub trait TftpServer: Send + Sync {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError>;
async fn ensure_initialized(&self) -> Result<(), ExecutorError>;
async fn commit_config(&self) -> Result<(), ExecutorError>;
async fn reload_restart(&self) -> Result<(), ExecutorError>;
}
impl std::fmt::Debug for dyn TftpServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"TftpServer serving files at {}",
self.get_ip()
))
}
}

View File

@@ -0,0 +1 @@
pub mod russh;

View File

View File

@@ -0,0 +1,58 @@
use async_trait::async_trait;
use std::sync::Arc;
use russh::{client, keys::key};
use crate::{
domain::executors::{ExecutorError, SshClient},
topology::IpAddress,
};
pub struct RusshClient;
#[async_trait]
impl SshClient for RusshClient {
async fn test_connection(
&self,
address: IpAddress,
_username: &str,
_password: &str,
) -> Result<(), crate::domain::executors::ExecutorError> {
let config = client::Config::default();
let c = Client {};
let mut client = client::connect(Arc::new(config), (address, 22), c).await?;
match client
.authenticate_password("nationtech", "opnsense")
.await?
{
true => Ok(()),
false => Err(ExecutorError::AuthenticationError(
"ssh authentication failed".to_string(),
)),
}
}
}
struct Client {}
// More SSH event handlers
// can be defined in this trait
// In this example, we're only using Channel, so these aren't needed.
#[async_trait]
impl client::Handler for Client {
type Error = ExecutorError;
async fn check_server_key(
&mut self,
_server_public_key: &key::PublicKey,
) -> Result<bool, Self::Error> {
Ok(true)
}
}
impl From<russh::Error> for ExecutorError {
fn from(_value: russh::Error) -> Self {
// TODO handle various russh errors properly
ExecutorError::NetworkError("Russh client error".to_string())
}
}

View File

@@ -0,0 +1,26 @@
use crate::hardware::ManagementInterface;
use crate::topology::IpAddress;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
#[derive(new)]
pub struct HPIlo {
ip_address: Option<IpAddress>,
mac_address: Option<MacAddress>,
}
impl ManagementInterface for HPIlo {
fn boot_to_pxe(&self) {
info!(
"Launching boot to pxe for ip {} mac address {}",
&self.ip_address.map_or(String::new(), |i| i.to_string()),
&self.mac_address.map_or(String::new(), |m| m.to_string()),
);
todo!()
}
fn get_supported_protocol_names(&self) -> String {
"ipmi,redfish".to_string()
}
}

View File

@@ -0,0 +1,20 @@
use crate::hardware::ManagementInterface;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
#[derive(new)]
pub struct IntelAmtManagement {
mac_address: MacAddress,
}
impl ManagementInterface for IntelAmtManagement {
fn boot_to_pxe(&self) {
info!("Launching boot to pxe for mac address {}", self.mac_address);
todo!()
}
fn get_supported_protocol_names(&self) -> String {
"IntelAMT".to_string()
}
}

4
harmony/src/infra/mod.rs Normal file
View File

@@ -0,0 +1,4 @@
pub mod executors;
pub mod hp_ilo;
pub mod intel_amt;
pub mod opnsense;

View File

@@ -0,0 +1,72 @@
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use log::debug;
use crate::{
executors::ExecutorError,
topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost},
};
use super::OPNSenseFirewall;
#[async_trait]
impl DhcpServer for OPNSenseFirewall {
async fn commit_config(&self) -> Result<(), ExecutorError> {
OPNSenseFirewall::commit_config(self).await
}
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> {
let mac: String = String::from(&entry.mac);
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense
.dhcp()
.add_static_mapping(&mac, entry.ip, &entry.name)
.unwrap();
}
debug!("Registered {:?}", entry);
Ok(())
}
async fn remove_static_mapping(&self, _mac: &MacAddress) -> Result<(), ExecutorError> {
todo!()
}
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
todo!()
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
}
fn get_host(&self) -> LogicalHost {
self.host.clone()
}
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
let ipv4 = match ip {
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
std::net::IpAddr::V6(_) => todo!("ipv6 not supported yet"),
};
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_next_server(ipv4);
debug!("OPNsense dhcp server set next server {ipv4}");
}
Ok(())
}
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_boot_filename(boot_filename);
debug!("OPNsense dhcp server set boot filename {boot_filename}");
}
Ok(())
}
}

View File

@@ -0,0 +1,91 @@
use crate::infra::opnsense::Host;
use crate::infra::opnsense::IpAddress;
use crate::infra::opnsense::LogicalHost;
use crate::{
executors::ExecutorError,
topology::{DnsRecord, DnsServer},
};
use async_trait::async_trait;
use super::OPNSenseFirewall;
#[async_trait]
impl DnsServer for OPNSenseFirewall {
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
let mut writable_opnsense = self.opnsense_config.write().await;
let mut dns = writable_opnsense.dns();
let hosts = hosts
.iter()
.map(|h| {
Host::new(
h.host.clone(),
h.domain.clone(),
h.record_type.to_string(),
h.value.to_string(),
)
})
.collect();
dns.register_hosts(hosts);
Ok(())
}
fn remove_record(
&mut self,
_name: &str,
_record_type: crate::topology::DnsRecordType,
) -> Result<(), ExecutorError> {
todo!()
}
async fn list_records(&self) -> Vec<crate::topology::DnsRecord> {
self.opnsense_config
.write()
.await
.dns()
.get_hosts()
.iter()
.map(|h| DnsRecord {
host: h.hostname.clone(),
domain: h.domain.clone(),
record_type: h
.rr
.parse()
.expect("received invalid record type {h.rr} from opnsense"),
value: h
.server
.parse()
.expect("received invalid ipv4 record from opnsense {h.server}"),
})
.collect()
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(&self)
}
fn get_host(&self) -> LogicalHost {
self.host.clone()
}
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
let mut writable_opnsense = self.opnsense_config.write().await;
let mut dns = writable_opnsense.dns();
dns.register_dhcp_leases(register);
Ok(())
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
let opnsense = self.opnsense_config.read().await;
opnsense
.save()
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
opnsense
.restart_dns()
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
}
}

View File

@@ -0,0 +1,27 @@
use crate::{
executors::ExecutorError,
topology::{Firewall, FirewallRule, IpAddress, LogicalHost},
};
use super::OPNSenseFirewall;
impl Firewall for OPNSenseFirewall {
fn add_rule(&mut self, _rule: FirewallRule) -> Result<(), ExecutorError> {
todo!()
}
fn remove_rule(&mut self, _rule_id: &str) -> Result<(), ExecutorError> {
todo!()
}
fn list_rules(&self) -> Vec<FirewallRule> {
todo!()
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
}
fn get_host(&self) -> LogicalHost {
self.host.clone()
}
}

View File

@@ -0,0 +1,75 @@
use async_trait::async_trait;
use log::info;
use crate::{
executors::ExecutorError,
topology::{HttpServer, IpAddress, Url},
};
use super::OPNSenseFirewall;
#[async_trait]
impl HttpServer for OPNSenseFirewall {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
let http_root_path = "/usr/local/http";
let config = self.opnsense_config.read().await;
info!("Uploading files from url {url} to {http_root_path}");
match url {
Url::LocalFolder(path) => {
config
.upload_files(path, http_root_path)
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}
Url::Remote(_url) => todo!(),
}
Ok(())
}
fn get_ip(&self) -> IpAddress {
todo!();
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
OPNSenseFirewall::commit_config(self).await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.opnsense_config
.write()
.await
.caddy()
.reload_restart()
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let caddy = config.caddy();
if let None = caddy.get_full_config() {
info!("Http config not available in opnsense config, installing package");
config.install_package("os-caddy").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(
"Executor failed when trying to install os-caddy package with error {e:?}"
))
})?;
} else {
info!("Http config available in opnsense config, assuming it is already installed");
}
info!("Adding custom caddy config files");
config
.upload_files(
"../../../watchguard/caddy_config",
"/usr/local/etc/caddy/caddy.d/",
)
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
info!("Enabling http server");
config.caddy().enable(true);
Ok(())
}
}

View File

@@ -0,0 +1,444 @@
use async_trait::async_trait;
use log::{debug, info, warn};
use opnsense_config_xml::{Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer};
use uuid::Uuid;
use crate::{
executors::ExecutorError,
topology::{
BackendServer, HealthCheck, HttpMethod, HttpStatusCode, IpAddress, LoadBalancer,
LoadBalancerService, LogicalHost,
},
};
use super::OPNSenseFirewall;
#[async_trait]
impl LoadBalancer for OPNSenseFirewall {
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
}
fn get_host(&self) -> LogicalHost {
self.host.clone()
}
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
warn!(
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
);
let mut config = self.opnsense_config.write().await;
let (frontend, backend, servers, healthcheck) =
harmony_load_balancer_service_to_haproxy_xml(service);
let mut load_balancer = config.load_balancer();
load_balancer.add_backend(backend);
load_balancer.add_frontend(frontend);
load_balancer.add_servers(servers);
if let Some(healthcheck) = healthcheck {
load_balancer.add_healthcheck(healthcheck);
}
Ok(())
}
async fn remove_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
todo!("Remove service not implemented yet {service:?}")
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
OPNSenseFirewall::commit_config(self).await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.opnsense_config
.write()
.await
.load_balancer()
.reload_restart()
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let load_balancer = config.load_balancer();
if let Some(config) = load_balancer.get_full_config() {
debug!(
"HAProxy config available in opnsense config, assuming it is already installed, {config:?}"
);
} else {
config.install_package("os-haproxy").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(
"Executor failed when trying to install os-haproxy package with error {e:?}"
))
})?;
}
config.load_balancer().enable(true);
Ok(())
}
async fn list_services(&self) -> Vec<LoadBalancerService> {
let mut config = self.opnsense_config.write().await;
let load_balancer = config.load_balancer();
let haproxy_xml_config = load_balancer.get_full_config();
haproxy_xml_config_to_harmony_loadbalancer(haproxy_xml_config)
}
}
pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
haproxy: &Option<HAProxy>,
) -> Vec<LoadBalancerService> {
let haproxy = match haproxy {
Some(haproxy) => haproxy,
None => return vec![],
};
haproxy
.frontends
.frontend
.iter()
.map(|frontend| {
let mut backend_servers = vec![];
let matching_backend = haproxy
.backends
.backends
.iter()
.find(|b| b.uuid == frontend.default_backend);
let mut health_check = None;
match matching_backend {
Some(backend) => {
backend_servers.append(&mut get_servers_for_backend(backend, haproxy));
health_check = get_health_check_for_backend(backend, haproxy);
}
None => {
warn!(
"HAProxy config could not find a matching backend for frontend {:?}",
frontend
);
}
}
LoadBalancerService {
backend_servers,
listening_port: frontend.bind.parse().expect(&format!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
frontend.bind
)),
health_check,
}
})
.collect()
}
pub(crate) fn get_servers_for_backend(
backend: &HAProxyBackend,
haproxy: &HAProxy,
) -> Vec<BackendServer> {
let backend_servers: Vec<&str> = match &backend.linked_servers.content {
Some(linked_servers) => linked_servers.split(',').collect(),
None => {
info!("No server defined for HAProxy backend {:?}", backend);
return vec![];
}
};
haproxy
.servers
.servers
.iter()
.filter_map(|server| {
if backend_servers.contains(&server.uuid.as_str()) {
return Some(BackendServer {
address: server.address.clone(),
port: server.port,
});
}
None
})
.collect()
}
pub(crate) fn get_health_check_for_backend(
backend: &HAProxyBackend,
haproxy: &HAProxy,
) -> Option<HealthCheck> {
let health_check_uuid = match &backend.health_check.content {
Some(uuid) => uuid,
None => return None,
};
let haproxy_health_check = match haproxy
.healthchecks
.healthchecks
.iter()
.find(|h| &h.uuid == health_check_uuid)
{
Some(health_check) => health_check,
None => return None,
};
let binding = haproxy_health_check.health_check_type.to_uppercase();
let uppercase = binding.as_str();
match uppercase {
"TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
if checkport.len() > 0 {
return Some(HealthCheck::TCP(Some(checkport.parse().expect(&format!(
"HAProxy check port should be a valid port number, got {checkport}"
)))));
}
}
return Some(HealthCheck::TCP(None));
}
"HTTP" => {
let path: String = haproxy_health_check
.http_uri
.content
.clone()
.unwrap_or_default();
let method: HttpMethod = haproxy_health_check
.http_method
.content
.clone()
.unwrap_or_default()
.into();
let status_code: HttpStatusCode = HttpStatusCode::Success2xx;
Some(HealthCheck::HTTP(path, method, status_code))
}
_ => panic!("Received unsupported health check type {}", uppercase),
}
}
pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
service: &LoadBalancerService,
) -> (
Frontend,
HAProxyBackend,
Vec<HAProxyServer>,
Option<HAProxyHealthCheck>,
) {
// Here we have to build :
// One frontend
// One backend
// One Option<healthcheck>
// Vec of servers
//
// Then merge then with haproxy config individually
//
// We also have to take into account that it is entirely possible that a backe uses a server
// with the same definition as in another backend. So when creating a new backend, we must not
// blindly create new servers because the backend does not exist yet. Even if it is a new
// backend, it may very well reuse existing servers
//
// Also we need to support router integration for port forwarding on WAN as a strategy to
// handle dyndns
// server is standalone
// backend points on server
// backend points to health check
// frontend points to backend
let healthcheck = if let Some(health_check) = &service.health_check {
match health_check {
HealthCheck::HTTP(path, http_method, _http_status_code) => {
let haproxy_check = HAProxyHealthCheck {
name: format!("HTTP_{http_method}_{path}"),
uuid: Uuid::new_v4().to_string(),
http_method: http_method.to_string().into(),
health_check_type: "http".to_string(),
http_uri: path.clone().into(),
interval: "2s".to_string(),
..Default::default()
};
Some(haproxy_check)
}
HealthCheck::TCP(port) => {
let (port, port_name) = match port {
Some(port) => (Some(port.to_string()), port.to_string()),
None => (None, "serverport".to_string()),
};
let haproxy_check = HAProxyHealthCheck {
name: format!("TCP_{port_name}"),
uuid: Uuid::new_v4().to_string(),
health_check_type: "tcp".to_string(),
checkport: port.into(),
interval: "2s".to_string(),
..Default::default()
};
Some(haproxy_check)
}
}
} else {
None
};
debug!("Built healthcheck {healthcheck:?}");
let servers: Vec<HAProxyServer> = service
.backend_servers
.iter()
.map(server_to_haproxy_server)
.collect();
debug!("Built servers {servers:?}");
let mut backend = HAProxyBackend {
uuid: Uuid::new_v4().to_string(),
enabled: 1,
name: format!("backend_{}", service.listening_port),
algorithm: "roundrobin".to_string(),
random_draws: Some(2),
stickiness_expire: "30m".to_string(),
stickiness_size: "50k".to_string(),
stickiness_conn_rate_period: "10s".to_string(),
stickiness_sess_rate_period: "10s".to_string(),
stickiness_http_req_rate_period: "10s".to_string(),
stickiness_http_err_rate_period: "10s".to_string(),
stickiness_bytes_in_rate_period: "1m".to_string(),
stickiness_bytes_out_rate_period: "1m".to_string(),
mode: "tcp".to_string(), // TODO do not depend on health check here
..Default::default()
};
info!("HAPRoxy backend algorithm is currently hardcoded to roundrobin");
if let Some(hcheck) = &healthcheck {
backend.health_check_enabled = 1;
backend.health_check = hcheck.uuid.clone().into();
}
backend.linked_servers = servers
.iter()
.map(|s| s.uuid.as_str())
.collect::<Vec<&str>>()
.join(",")
.into();
debug!("Built backend {backend:?}");
let frontend = Frontend {
uuid: uuid::Uuid::new_v4().to_string(),
enabled: 1,
name: format!("frontend_{}", service.listening_port),
bind: service.listening_port.to_string(),
mode: "tcp".to_string(), // TODO do not depend on health check here
default_backend: backend.uuid.clone(),
..Default::default()
};
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
debug!("Built frontend {frontend:?}");
(frontend, backend, servers, healthcheck)
}
fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
HAProxyServer {
uuid: Uuid::new_v4().to_string(),
name: format!("{}_{}", &server.address, &server.port),
enabled: 1,
address: server.address.clone(),
port: server.port,
mode: "active".to_string(),
server_type: "static".to_string(),
..Default::default()
}
}
#[cfg(test)]
mod tests {
use opnsense_config_xml::HAProxyServer;
use super::*;
#[test]
fn test_get_servers_for_backend_with_linked_servers() {
// Create a backend with linked servers
let mut backend = HAProxyBackend::default();
backend.linked_servers.content = Some("server1,server2".to_string());
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let mut server = HAProxyServer::default();
server.uuid = "server3".to_string();
server.address = "192.168.1.3".to_string();
server.port = 8080;
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(result, vec![BackendServer {
address: "192.168.1.1".to_string(),
port: 80,
},]);
}
#[test]
fn test_get_servers_for_backend_no_linked_servers() {
// Create a backend with no linked servers
let backend = HAProxyBackend::default();
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(result, vec![]);
}
#[test]
fn test_get_servers_for_backend_no_matching_servers() {
// Create a backend with linked servers that do not match any in HAProxy
let mut backend = HAProxyBackend::default();
backend.linked_servers.content = Some("server4,server5".to_string());
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(result, vec![]);
}
#[test]
fn test_get_servers_for_backend_multiple_linked_servers() {
// Create a backend with multiple linked servers
let mut backend = HAProxyBackend::default();
backend.linked_servers.content = Some("server1,server2".to_string());
// Create an HAProxy instance with matching servers
let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "some-hostname.test.mcd".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let mut server = HAProxyServer::default();
server.uuid = "server2".to_string();
server.address = "192.168.1.2".to_string();
server.port = 8080;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(result, vec![
BackendServer {
address: "some-hostname.test.mcd".to_string(),
port: 80,
},
BackendServer {
address: "192.168.1.2".to_string(),
port: 8080,
},
]);
}
}

View File

@@ -0,0 +1,15 @@
use crate::hardware::ManagementInterface;
use derive_new::new;
#[derive(new)]
pub struct OPNSenseManagementInterface {}
impl ManagementInterface for OPNSenseManagementInterface {
fn boot_to_pxe(&self) {
todo!()
}
fn get_supported_protocol_names(&self) -> String {
"OPNSenseSSH".to_string()
}
}

View File

@@ -0,0 +1,51 @@
mod dhcp;
mod dns;
mod firewall;
mod http;
mod load_balancer;
mod management;
mod tftp;
use std::sync::Arc;
pub use management::*;
use opnsense_config_xml::Host;
use tokio::sync::RwLock;
use crate::{
executors::ExecutorError,
topology::{IpAddress, LogicalHost},
};
#[derive(Debug, Clone)]
pub struct OPNSenseFirewall {
opnsense_config: Arc<RwLock<opnsense_config::Config>>,
host: LogicalHost,
}
impl OPNSenseFirewall {
pub fn get_ip(&self) -> IpAddress {
self.host.ip
}
pub async fn new(host: LogicalHost, port: Option<u16>, username: &str, password: &str) -> Self {
Self {
opnsense_config: Arc::new(RwLock::new(
opnsense_config::Config::from_credentials(host.ip, port, username, password).await,
)),
host,
}
}
pub fn get_opnsense_config(&self) -> Arc<RwLock<opnsense_config::Config>> {
self.opnsense_config.clone()
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.opnsense_config
.read()
.await
.apply()
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
}
}

View File

@@ -0,0 +1,77 @@
use async_trait::async_trait;
use log::info;
use crate::{
executors::ExecutorError,
topology::{IpAddress, TftpServer, Url},
};
use super::OPNSenseFirewall;
#[async_trait]
impl TftpServer for OPNSenseFirewall {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
let tftp_root_path = "/usr/local/tftp";
let config = self.opnsense_config.read().await;
info!("Uploading files from url {url} to {tftp_root_path}");
match url {
Url::LocalFolder(path) => {
config
.upload_files(path, tftp_root_path)
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}
Url::Remote(url) => todo!("This url is not supported yet {url}"),
}
Ok(())
}
fn get_ip(&self) -> IpAddress {
todo!()
}
async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> {
info!("Setting listen_ip to {}", &ip);
self.opnsense_config
.write()
.await
.tftp()
.listen_ip(&ip.to_string());
Ok(())
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
OPNSenseFirewall::commit_config(self).await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.opnsense_config
.write()
.await
.tftp()
.reload_restart()
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let tftp = config.tftp();
if let None = tftp.get_full_config() {
info!("Tftp config not available in opnsense config, installing package");
config.install_package("os-tftp").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(
"Executor failed when trying to install os-tftp package with error {e:?}"
))
})?;
} else {
info!("Tftp config available in opnsense config, assuming it is already installed");
}
info!("Enabling tftp server");
config.tftp().enable(true);
Ok(())
}
}

7
harmony/src/lib.rs Normal file
View File

@@ -0,0 +1,7 @@
mod domain;
pub use domain::*;
pub mod infra;
pub mod modules;
#[cfg(test)]
mod test {}

16
harmony/src/main_ipmi.rs Normal file
View File

@@ -0,0 +1,16 @@
use rust_ipmi::{IPMIClient, NetFn};
fn main() {
let connection_string = "192.168.11.132:443";
println!("Hello, world! {}", connection_string);
let mut client: IPMIClient = IPMIClient::new(connection_string).expect("Failed to create ipmi client");
client.establish_connection("root", "YOUR_PASSWORD")
.expect("Failed to establish connection with the BMC");
let response = client.send_raw_request(NetFn::App, 0x3b, Some(vec![0x04]));
match response {
Ok(response) => println!("{}", response),
Err(err) => println!("Got error {:?}", err),
}
}

View File

@@ -0,0 +1,20 @@
use libredfish::{Config, Redfish};
use reqwest::blocking::Client;
pub fn main() {
let client = Client::builder().danger_accept_invalid_certs(true).build().expect("Failed to build reqwest client");
let redfish = Redfish::new(
client,
Config {
user: Some(String::from("Administrator")),
endpoint: String::from("10.10.8.104/redfish/v1"),
// password: Some(String::from("YOUR_PASSWORD")),
password: Some(String::from("wrongpass")),
port: None,
},
);
let response = redfish.get_power_status().expect("Failed redfish request");
println!("Got power {:?}", response);
}

180
harmony/src/modules/dhcp.rs Normal file
View File

@@ -0,0 +1,180 @@
use std::sync::Arc;
use async_trait::async_trait;
use derive_new::new;
use log::info;
use crate::{
domain::{data::Version, interpret::InterpretStatus},
interpret::{Interpret, InterpretError, InterpretName, Outcome},
inventory::Inventory,
topology::{DHCPStaticEntry, HAClusterTopology, HostBinding, IpAddress},
};
use crate::domain::score::Score;
#[derive(Debug, new, Clone)]
pub struct DhcpScore {
pub host_binding: Vec<HostBinding>,
pub next_server: Option<IpAddress>,
pub boot_filename: Option<String>,
}
impl Score for DhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
Box::new(DhcpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"DhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
#[derive(Debug, Clone)]
pub struct DhcpInterpret {
score: DhcpScore,
version: Version,
status: InterpretStatus,
}
impl DhcpInterpret {
pub fn new(score: DhcpScore) -> Self {
let version = Version::from("1.0.0").expect("Version should be valid");
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn add_static_entries(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let dhcp_entries: Vec<DHCPStaticEntry> = self
.score
.host_binding
.iter()
.map(|binding| {
let ip = match binding.logical_host.ip {
std::net::IpAddr::V4(ipv4) => ipv4,
std::net::IpAddr::V6(_) => {
unimplemented!("DHCPStaticEntry only supports ipv4 at the moment")
}
};
DHCPStaticEntry {
name: binding.logical_host.name.clone(),
mac: binding.physical_host.cluster_mac(),
ip,
}
})
.collect();
info!("DHCPStaticEntry : {:?}", dhcp_entries);
let dhcp_server = Arc::new(topology.dhcp_server.clone());
info!("DHCP server : {:?}", dhcp_server);
let number_new_entries = dhcp_entries.len();
for entry in dhcp_entries.into_iter() {
match dhcp_server.add_static_mapping(&entry).await {
Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry),
Err(_) => todo!(),
}
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret registered {} entries", number_new_entries),
))
}
async fn set_pxe_options(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let next_server_outcome = match self.score.next_server {
Some(next_server) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_next_server(next_server).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set next boot to {next_server}"),
)
}
None => Outcome::noop(),
};
let boot_filename_outcome = match &self.score.boot_filename {
Some(boot_filename) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_boot_filename(&boot_filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set boot filename to {boot_filename}"),
)
}
None => Outcome::noop(),
};
if next_server_outcome.status == InterpretStatus::NOOP
&& boot_filename_outcome.status == InterpretStatus::NOOP
{
return Ok(Outcome::noop());
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Dhcp Interpret Set next boot to {:?} and boot_filename to {:?}",
self.score.boot_filename, self.score.boot_filename
),
))
}
}
#[async_trait]
impl Interpret for DhcpInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDHCP
}
fn get_version(&self) -> crate::domain::data::Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<crate::domain::data::Id> {
todo!()
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
info!("Executing {} on inventory {inventory:?}", self.get_name());
self.set_pxe_options(inventory, topology).await?;
self.add_static_entries(inventory, topology).await?;
topology.dhcp_server.commit_config().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret execution successful"),
))
}
}

122
harmony/src/modules/dns.rs Normal file
View File

@@ -0,0 +1,122 @@
use async_trait::async_trait;
use derive_new::new;
use log::info;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{DnsRecord, HAClusterTopology},
};
#[derive(Debug, new, Clone)]
pub struct DnsScore {
dns_entries: Vec<DnsRecord>,
register_dhcp_leases: Option<bool>,
}
impl Score for DnsScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
Box::new(DnsInterpret::new(self.clone()))
}
fn name(&self) -> String {
"DnsScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
#[derive(Debug, Clone)]
pub struct DnsInterpret {
score: DnsScore,
version: Version,
status: InterpretStatus,
}
impl DnsInterpret {
pub fn new(score: DnsScore) -> Self {
let version = Version::from("1.0.0").expect("Version should be valid");
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn serve_dhcp_entries(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let dns = topology.dns_server.clone();
if let Some(register) = self.score.register_dhcp_leases {
dns.register_dhcp_leases(register).await?;
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"DNS Interpret execution successfull".to_string(),
))
}
async fn ensure_hosts_registered(
&self,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let entries = &self.score.dns_entries;
topology
.dns_server
.ensure_hosts_registered(entries.clone())
.await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"DnsInterpret registered {} hosts successfully",
entries.len()
),
))
}
}
#[async_trait]
impl Interpret for DnsInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDns
}
fn get_version(&self) -> crate::domain::data::Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<crate::domain::data::Id> {
todo!()
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
info!("Executing {} on inventory {inventory:?}", self.get_name());
self.serve_dhcp_entries(inventory, topology).await?;
self.ensure_hosts_registered(&topology).await?;
topology.dns_server.commit_config().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Dns Interpret execution successful"),
))
}
}

View File

@@ -0,0 +1,138 @@
use async_trait::async_trait;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
};
/// Score that always errors. This is only useful for development/testing purposes. It does nothing
/// except returning Err(InterpretError) when interpreted.
#[derive(Debug, Clone)]
pub struct ErrorScore;
impl Score for ErrorScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
Box::new(DummyInterpret {
result: Err(InterpretError::new("Error Score default error".to_string())),
status: InterpretStatus::QUEUED,
})
}
fn name(&self) -> String {
"ErrorScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// Score that always succeeds. This is only useful for development/testing purposes. It does nothing
/// except returning Ok(Outcome::success) when interpreted.
#[derive(Debug, Clone)]
pub struct SuccessScore;
impl Score for SuccessScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
Box::new(DummyInterpret {
result: Ok(Outcome::success("SuccessScore default success".to_string())),
status: InterpretStatus::QUEUED,
})
}
fn name(&self) -> String {
"SuccessScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// An interpret that only returns the result it is given when built. It does nothing else. Only
/// useful for development/testing purposes.
#[derive(Debug)]
struct DummyInterpret {
status: InterpretStatus,
result: Result<Outcome, InterpretError>,
}
#[async_trait]
impl Interpret for DummyInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Dummy
}
fn get_version(&self) -> Version {
Version::from("1.0.0").unwrap()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<crate::domain::data::Id> {
todo!()
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.result.clone()
}
}
/// Score that always panics. This is only useful for development/testing purposes. It does nothing
/// except panic! with an error message when interpreted
#[derive(Debug, Clone)]
pub struct PanicScore;
impl Score for PanicScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
Box::new(PanicInterpret {})
}
fn name(&self) -> String {
"PanicScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// An interpret that always panics when executed. Useful for development/testing purposes.
#[derive(Debug)]
struct PanicInterpret;
#[async_trait]
impl Interpret for PanicInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Panic
}
fn get_version(&self) -> Version {
Version::from("1.0.0").unwrap()
}
fn get_status(&self) -> InterpretStatus {
InterpretStatus::QUEUED
}
fn get_children(&self) -> Vec<crate::domain::data::Id> {
todo!()
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
panic!("Panic interpret always panics when executed")
}
}

View File

@@ -0,0 +1,70 @@
use async_trait::async_trait;
use derive_new::new;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, Url},
};
#[derive(Debug, new, Clone)]
pub struct HttpScore {
files_to_serve: Url,
}
impl Score for HttpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
Box::new(HttpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"HttpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
pub struct HttpInterpret {
score: HttpScore,
}
#[async_trait]
impl Interpret for HttpInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let http_server = &topology.http_server;
http_server.ensure_initialized().await?;
// http_server.set_ip(topology.router.get_gateway()).await?;
http_server.serve_files(&self.score.files_to_serve).await?;
http_server.commit_config().await?;
http_server.reload_restart().await?;
Ok(Outcome::success(format!(
"Http Server running and serving files from {}",
self.score.files_to_serve
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::Http
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,58 @@
use k8s_openapi::api::apps::v1::Deployment;
use serde_json::json;
use crate::{interpret::Interpret, score::Score};
use super::resource::{K8sResourceInterpret, K8sResourceScore};
#[derive(Debug, Clone)]
pub struct K8sDeploymentScore {
pub name: String,
pub image: String,
}
impl Score for K8sDeploymentScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
let deployment: Deployment = serde_json::from_value(json!(
{
"metadata": {
"name": self.name
},
"spec": {
"selector": {
"matchLabels": {
"app": self.name
},
},
"template": {
"metadata": {
"labels": {
"app": self.name
},
},
"spec": {
"containers": [
{
"image": self.image,
"name": self.image
}
]
}
}
}
}
))
.unwrap();
Box::new(K8sResourceInterpret {
score: K8sResourceScore::single(deployment.clone()),
})
}
fn name(&self) -> String {
"K8sDeploymentScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -0,0 +1,2 @@
pub mod deployment;
pub mod resource;

View File

@@ -0,0 +1,101 @@
use async_trait::async_trait;
use k8s_openapi::NamespaceResourceScope;
use kube::Resource;
use serde::de::DeserializeOwned;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
};
#[derive(Debug, Clone)]
pub struct K8sResourceScore<K: Resource + std::fmt::Debug> {
pub resource: Vec<K>,
}
impl<K: Resource + std::fmt::Debug> K8sResourceScore<K> {
pub fn single(resource: K) -> Self {
Self {
resource: vec![resource],
}
}
}
impl<
K: Resource<Scope = NamespaceResourceScope>
+ std::fmt::Debug
+ Sync
+ DeserializeOwned
+ Default
+ serde::Serialize
+ 'static
+ Send
+ Clone,
> Score for K8sResourceScore<K>
where
<K as kube::Resource>::DynamicType: Default,
{
fn create_interpret(&self) -> Box<dyn Interpret> {
todo!()
}
fn name(&self) -> String {
"K8sResourceScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
pub struct K8sResourceInterpret<K: Resource + std::fmt::Debug + Sync + Send> {
pub score: K8sResourceScore<K>,
}
#[async_trait]
impl<
K: Resource<Scope = NamespaceResourceScope>
+ Clone
+ std::fmt::Debug
+ DeserializeOwned
+ serde::Serialize
+ Default
+ Send
+ Sync,
> Interpret for K8sResourceInterpret<K>
where
<K as kube::Resource>::DynamicType: Default,
{
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
topology
.oc_client()
.await
.expect("Environment should provide enough information to instanciate a client")
.apply_namespaced(&self.score.resource)
.await?;
Ok(Outcome::success(
"Successfully applied resource".to_string(),
))
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,103 @@
use async_trait::async_trait;
use log::info;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, LoadBalancerService},
};
#[derive(Debug, Clone)]
pub struct LoadBalancerScore {
pub public_services: Vec<LoadBalancerService>,
pub private_services: Vec<LoadBalancerService>,
// TODO public and private services are likely wrong, should be a single list of
// (listen_interface, LoadBalancerService) tuples or something like that
// I am not sure what to use as listen_interface, should it be interface name, ip address,
// uuid?
}
impl Score for LoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
Box::new(LoadBalancerInterpret::new(self.clone()))
}
fn name(&self) -> String {
"LoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
pub struct LoadBalancerInterpret {
version: Version,
status: InterpretStatus,
score: LoadBalancerScore,
}
impl LoadBalancerInterpret {
pub fn new(score: LoadBalancerScore) -> Self {
Self {
version: Version::from("1.0.0").expect("Version should be valid"),
status: InterpretStatus::QUEUED,
score,
}
}
}
#[async_trait]
impl Interpret for LoadBalancerInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
info!(
"Making sure Load Balancer is initialized: {:?}",
topology.load_balancer.ensure_initialized().await?
);
for service in self.score.public_services.iter() {
info!("Ensuring service exists {service:?}");
topology
.load_balancer
.ensure_service_exists(service)
.await?;
}
for service in self.score.private_services.iter() {
info!("Ensuring private service exists {service:?}");
topology
.load_balancer
.ensure_service_exists(service)
.await?;
}
info!("Applying load balancer configuration");
topology.load_balancer.commit_config().await?;
info!("Making a full reload and restart of haproxy");
topology.load_balancer.reload_restart().await?;
Ok(Outcome::success(format!(
"Load balancer successfully configured {} services",
self.score.public_services.len() + self.score.private_services.len()
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::LoadBalancer
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,9 @@
pub mod dhcp;
pub mod dns;
pub mod dummy;
pub mod http;
pub mod k8s;
pub mod load_balancer;
pub mod okd;
pub mod opnsense;
pub mod tftp;

View File

@@ -0,0 +1,61 @@
use crate::{
interpret::Interpret,
inventory::Inventory,
modules::dhcp::DhcpScore,
score::Score,
topology::{HAClusterTopology, HostBinding},
};
#[derive(Debug, Clone)]
pub struct OKDBootstrapDhcpScore {
dhcp_score: DhcpScore,
}
impl OKDBootstrapDhcpScore {
pub fn new(topology: &HAClusterTopology, inventory: &Inventory) -> Self {
let mut host_binding: Vec<_> = topology
.control_plane
.iter()
.enumerate()
.map(|(index, topology_entry)| HostBinding {
logical_host: topology_entry.clone(),
physical_host: inventory
.control_plane_host
.get(index)
.expect("Iventory should contain at least as many physical hosts as topology")
.clone(),
})
.collect();
host_binding.push(HostBinding {
logical_host: topology.bootstrap_host.clone(),
physical_host: inventory
.worker_host
.get(0)
.expect("Should have at least one worker to be used as bootstrap node")
.clone(),
});
Self {
dhcp_score: DhcpScore::new(
host_binding,
// TODO : we should add a tftp server to the topology instead of relying on the
// router address, this is leaking implementation details
Some(topology.router.get_gateway()),
Some("bootx64.efi".to_string()),
),
}
}
}
impl Score for OKDBootstrapDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
self.dhcp_score.create_interpret()
}
fn name(&self) -> String {
"OKDBootstrapDhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -0,0 +1,84 @@
use std::net::SocketAddr;
use crate::{
interpret::Interpret,
modules::load_balancer::LoadBalancerScore,
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode,
LoadBalancerService,
},
};
#[derive(Debug, Clone)]
pub struct OKDBootstrapLoadBalancerScore {
load_balancer_score: LoadBalancerScore,
}
impl OKDBootstrapLoadBalancerScore {
pub fn new(topology: &HAClusterTopology) -> Self {
let private_ip = topology.router.get_gateway();
let private_services = vec![
LoadBalancerService {
backend_servers: Self::topology_to_backend_server(topology, 80),
listening_port: SocketAddr::new(private_ip, 80),
health_check: Some(HealthCheck::TCP(None)),
},
LoadBalancerService {
backend_servers: Self::topology_to_backend_server(topology, 443),
listening_port: SocketAddr::new(private_ip, 443),
health_check: Some(HealthCheck::TCP(None)),
},
LoadBalancerService {
backend_servers: Self::topology_to_backend_server(topology, 22623),
listening_port: SocketAddr::new(private_ip, 22623),
health_check: Some(HealthCheck::TCP(None)),
},
LoadBalancerService {
backend_servers: Self::topology_to_backend_server(topology, 6443),
listening_port: SocketAddr::new(private_ip, 6443),
health_check: Some(HealthCheck::HTTP(
"/readyz".to_string(),
HttpMethod::GET,
HttpStatusCode::Success2xx,
)),
},
];
Self {
load_balancer_score: LoadBalancerScore {
public_services: vec![],
private_services,
},
}
}
fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
let mut backend: Vec<_> = topology
.control_plane
.iter()
.map(|cp| BackendServer {
address: cp.ip.to_string(),
port,
})
.collect();
backend.push(BackendServer {
address: topology.bootstrap_host.ip.to_string(),
port,
});
backend
}
}
impl Score for OKDBootstrapLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
self.load_balancer_score.create_interpret()
}
fn name(&self) -> String {
"OKDBootstrapLoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -0,0 +1,53 @@
use crate::{
interpret::Interpret,
inventory::Inventory,
modules::dhcp::DhcpScore,
score::Score,
topology::{HAClusterTopology, HostBinding},
};
#[derive(Debug, Clone)]
pub struct OKDDhcpScore {
dhcp_score: DhcpScore,
}
impl OKDDhcpScore {
pub fn new(topology: &HAClusterTopology, inventory: &Inventory) -> Self {
let host_binding = topology
.control_plane
.iter()
.enumerate()
.map(|(index, topology_entry)| HostBinding {
logical_host: topology_entry.clone(),
physical_host: inventory
.control_plane_host
.get(index)
.expect("Iventory should contain at least as many physical hosts as topology")
.clone(),
})
.collect();
Self {
// TODO : we should add a tftp server to the topology instead of relying on the
// router address, this is leaking implementation details
dhcp_score: DhcpScore {
host_binding,
next_server: Some(topology.router.get_gateway()),
boot_filename: Some("bootx64.efi".to_string()),
},
}
}
}
impl Score for OKDDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
self.dhcp_score.create_interpret()
}
fn name(&self) -> String {
"OKDDhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -0,0 +1,55 @@
use crate::{
interpret::Interpret,
modules::dns::DnsScore,
score::Score,
topology::{DnsRecord, DnsRecordType, HAClusterTopology},
};
#[derive(Debug, Clone)]
pub struct OKDDnsScore {
dns_score: DnsScore,
}
impl OKDDnsScore {
pub fn new(topology: &HAClusterTopology) -> Self {
let cluster_domain_name = &topology.domain_name;
let dns_entries = vec![
DnsRecord {
host: "api".to_string(),
domain: cluster_domain_name.clone(),
record_type: DnsRecordType::A,
value: topology.dns_server.get_ip(),
},
DnsRecord {
host: "api-int".to_string(),
domain: cluster_domain_name.clone(),
record_type: DnsRecordType::A,
value: topology.dns_server.get_ip(),
},
DnsRecord {
host: "*".to_string(),
domain: format!("apps.{}", cluster_domain_name),
record_type: DnsRecordType::A,
value: topology.dns_server.get_ip(),
},
];
Self {
dns_score: DnsScore::new(dns_entries, Some(true)),
}
}
}
impl Score for OKDDnsScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
self.dns_score.create_interpret()
}
fn name(&self) -> String {
"OKDDnsScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -0,0 +1,95 @@
use std::net::SocketAddr;
use crate::{
interpret::Interpret,
modules::load_balancer::LoadBalancerScore,
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode,
LoadBalancerService,
},
};
#[derive(Debug, Clone)]
pub struct OKDLoadBalancerScore {
load_balancer_score: LoadBalancerScore,
}
impl OKDLoadBalancerScore {
pub fn new(topology: &HAClusterTopology) -> Self {
let public_ip = topology.router.get_gateway();
let public_services = vec![
LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 80),
listening_port: SocketAddr::new(public_ip, 80),
health_check: Some(HealthCheck::TCP(None)),
},
LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 443),
listening_port: SocketAddr::new(public_ip, 443),
health_check: Some(HealthCheck::TCP(None)),
},
];
let private_services = vec![
LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 80),
listening_port: SocketAddr::new(public_ip, 80),
health_check: Some(HealthCheck::TCP(None)),
},
LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 443),
listening_port: SocketAddr::new(public_ip, 443),
health_check: Some(HealthCheck::TCP(None)),
},
LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 22623),
listening_port: SocketAddr::new(public_ip, 22623),
health_check: Some(HealthCheck::TCP(None)),
},
LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 6443),
listening_port: SocketAddr::new(public_ip, 6443),
health_check: Some(HealthCheck::HTTP(
"/readyz".to_string(),
HttpMethod::GET,
HttpStatusCode::Success2xx,
)),
},
];
Self {
load_balancer_score: LoadBalancerScore {
public_services,
private_services,
},
}
}
fn control_plane_to_backend_server(
topology: &HAClusterTopology,
port: u16,
) -> Vec<BackendServer> {
topology
.control_plane
.iter()
.map(|cp| BackendServer {
address: cp.ip.to_string(),
port,
})
.collect()
}
}
impl Score for OKDLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
self.load_balancer_score.create_interpret()
}
fn name(&self) -> String {
"OKDLoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -0,0 +1,6 @@
pub mod bootstrap_dhcp;
pub mod bootstrap_load_balancer;
pub mod dhcp;
pub mod dns;
pub mod load_balancer;
pub mod upgrade;

View File

@@ -0,0 +1,35 @@
use crate::{data::Version, score::Score};
#[derive(Debug, Clone)]
pub struct OKDUpgradeScore {
current_version: Version,
target_version: Version,
}
impl OKDUpgradeScore {
pub fn new() -> Self {
Self {
current_version: Version::from("4.17.0-okd-scos.0").unwrap(),
target_version: Version::from("").unwrap(),
}
}
}
// impl Score for OKDUpgradeScore {
// fn create_interpret(self) -> Box<dyn Interpret> {
// // Should this be a specialized interpret for OKD upgrades or rather a set of interprets
// // such as :
// //
// // MultiStageInterpret :
// // stages : vec![
// // vec![CheckOperatorsUpgraded, CheckClusterHealthy, CheckOtherPrecondition],
// // vec![PerformUpgrade],
// // vec![VerifyUpgrade, CheckClusterHealth],
// // ]
// todo!()
// }
//
// fn name(&self) -> String {
// "OKDUpgradeScore".to_string()
// }
// }

View File

@@ -0,0 +1,85 @@
use std::sync::Arc;
use async_trait::async_trait;
use tokio::sync::RwLock;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
};
#[derive(Debug, Clone)]
pub struct OPNSenseShellCommandScore {
pub opnsense: Arc<RwLock<opnsense_config::Config>>,
pub command: String,
}
impl Score for OPNSenseShellCommandScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
Box::new(OPNsenseInterpret {
status: InterpretStatus::QUEUED,
score: self.clone(),
})
}
fn name(&self) -> String {
"OPNSenseShellCommandScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
pub struct OPNsenseInterpret {
status: InterpretStatus,
score: OPNSenseShellCommandScore,
}
#[async_trait]
impl Interpret for OPNsenseInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let output = self
.score
.opnsense
.read()
.await
.run_command(&self.score.command)
.await?;
Ok(Outcome::success(format!(
"Command execution successful : {}\n\n{output}",
self.score.command
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::OPNSense
}
fn get_version(&self) -> Version {
Version::from("1.0.0").unwrap()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl From<opnsense_config::Error> for InterpretError {
fn from(value: opnsense_config::Error) -> Self {
Self::new(format!("opnsense_config::Error {value:?}"))
}
}

View File

@@ -0,0 +1,70 @@
use async_trait::async_trait;
use derive_new::new;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, Url},
};
#[derive(Debug, new, Clone)]
pub struct TftpScore {
files_to_serve: Url,
}
impl Score for TftpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
Box::new(TftpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"TftpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
pub struct TftpInterpret {
score: TftpScore,
}
#[async_trait]
impl Interpret for TftpInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
let tftp_server = &topology.tftp_server;
tftp_server.ensure_initialized().await?;
tftp_server.set_ip(topology.router.get_gateway()).await?;
tftp_server.serve_files(&self.score.files_to_serve).await?;
tftp_server.commit_config().await?;
tftp_server.reload_restart().await?;
Ok(Outcome::success(format!(
"TFTP Server running and serving files from {}",
self.score.files_to_serve
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::Tftp
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}