Merge branch 'master' into feat/settingUpNDC

This commit is contained in:
2025-05-06 11:58:12 -04:00
103 changed files with 8763 additions and 946 deletions

View File

@@ -7,25 +7,35 @@ license.workspace = true
[dependencies]
libredfish = "0.1.1"
reqwest = {version = "0.11", features = ["blocking", "json"] }
reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde = { version = "1.0.209", features = ["derive"] }
serde_json = "1.0.127"
tokio = { workspace = true }
derive-new = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
async-trait = { workspace = true }
cidr = { workspace = true }
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
env_logger.workspace = true
async-trait.workspace = true
cidr.workspace = true
opnsense-config = { path = "../opnsense-config" }
opnsense-config-xml = { path = "../opnsense-config-xml" }
harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid = { workspace = true }
url = { workspace = true }
kube = { workspace = true }
k8s-openapi = { workspace = true }
serde_yaml = { workspace = true }
http = { workspace = true }
uuid.workspace = true
url.workspace = true
kube.workspace = true
k8s-openapi.workspace = true
serde_yaml.workspace = true
http.workspace = true
serde-value.workspace = true
inquire.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true

View File

@@ -0,0 +1,13 @@
use lazy_static::lazy_static;
use std::path::PathBuf;
lazy_static! {
pub static ref HARMONY_CONFIG_DIR: PathBuf = directories::BaseDirs::new()
.unwrap()
.data_dir()
.join("harmony");
pub static ref REGISTRY_URL: String = std::env::var("HARMONY_REGISTRY_URL")
.unwrap_or_else(|_| "hub.nationtech.io".to_string());
pub static ref REGISTRY_PROJECT: String =
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
}

View File

@@ -2,6 +2,8 @@ use std::sync::Arc;
use derive_new::new;
use harmony_types::net::MacAddress;
use serde::{Serialize, Serializer, ser::SerializeStruct};
use serde_value::Value;
pub type HostGroup = Vec<PhysicalHost>;
pub type SwitchGroup = Vec<Switch>;
@@ -75,10 +77,7 @@ impl PhysicalHost {
}
pub fn label(mut self, name: String, value: String) -> Self {
self.labels.push(Label {
_name: name,
_value: value,
});
self.labels.push(Label { name, value });
self
}
@@ -88,7 +87,49 @@ impl PhysicalHost {
}
}
#[derive(new)]
// Custom Serialize implementation for PhysicalHost
impl Serialize for PhysicalHost {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Determine the number of fields
let mut num_fields = 5; // category, network, storage, labels, management
if self.memory_size.is_some() {
num_fields += 1;
}
if self.cpu_count.is_some() {
num_fields += 1;
}
// Create a serialization structure
let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
// Serialize the standard fields
state.serialize_field("category", &self.category)?;
state.serialize_field("network", &self.network)?;
state.serialize_field("storage", &self.storage)?;
state.serialize_field("labels", &self.labels)?;
// Serialize optional fields
if let Some(memory) = self.memory_size {
state.serialize_field("memory_size", &memory)?;
}
if let Some(cpu) = self.cpu_count {
state.serialize_field("cpu_count", &cpu)?;
}
let mgmt_data = self.management.serialize_management();
// pub management: Arc<dyn ManagementInterface>,
// Handle management interface - either as a field or flattened
state.serialize_field("management", &mgmt_data)?;
state.end()
}
}
#[derive(new, Serialize)]
pub struct ManualManagementInterface;
impl ManagementInterface for ManualManagementInterface {
@@ -102,7 +143,7 @@ impl ManagementInterface for ManualManagementInterface {
}
}
pub trait ManagementInterface: Send + Sync {
pub trait ManagementInterface: Send + Sync + SerializableManagement {
fn boot_to_pxe(&self);
fn get_supported_protocol_names(&self) -> String;
}
@@ -116,21 +157,49 @@ impl std::fmt::Debug for dyn ManagementInterface {
}
}
#[derive(Debug, Clone)]
// Define a trait for serializing management interfaces
pub trait SerializableManagement {
fn serialize_management(&self) -> Value;
}
// Provide a blanket implementation for all types that implement both ManagementInterface and Serialize
impl<T> SerializableManagement for T
where
T: ManagementInterface + Serialize,
{
fn serialize_management(&self) -> Value {
serde_value::to_value(self).expect("ManagementInterface should serialize successfully")
}
}
#[derive(Debug, Clone, Serialize)]
pub enum HostCategory {
Server,
Firewall,
Switch,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct NetworkInterface {
pub name: Option<String>,
pub mac_address: MacAddress,
pub speed: Option<u64>,
}
#[derive(Debug, new, Clone)]
#[cfg(test)]
use harmony_macros::mac_address;
#[cfg(test)]
impl NetworkInterface {
pub fn dummy() -> Self {
Self {
name: Some(String::new()),
mac_address: mac_address!("00:00:00:00:00:00"),
speed: Some(0),
}
}
}
#[derive(Debug, new, Clone, Serialize)]
pub enum StorageConnectionType {
Sata3g,
Sata6g,
@@ -138,13 +207,13 @@ pub enum StorageConnectionType {
Sas12g,
PCIE,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub enum StorageKind {
SSD,
NVME,
HDD,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Storage {
pub connection: StorageConnectionType,
pub kind: StorageKind,
@@ -152,20 +221,33 @@ pub struct Storage {
pub serial: String,
}
#[derive(Debug, Clone)]
#[cfg(test)]
impl Storage {
pub fn dummy() -> Self {
Self {
connection: StorageConnectionType::Sata3g,
kind: StorageKind::SSD,
size: 0,
serial: String::new(),
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Switch {
_interface: Vec<NetworkInterface>,
_management_interface: NetworkInterface,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Label {
_name: String,
_value: String,
pub name: String,
pub value: String,
}
pub type Address = String;
#[derive(new, Debug)]
#[derive(new, Debug, Serialize)]
pub struct Location {
pub address: Address,
pub name: String,
@@ -179,3 +261,158 @@ impl Location {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
// Mock implementation of ManagementInterface
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockHPIlo {
ip: String,
username: String,
password: String,
firmware_version: String,
}
impl ManagementInterface for MockHPIlo {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
// Another mock implementation
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockDellIdrac {
hostname: String,
port: u16,
api_token: String,
}
impl ManagementInterface for MockDellIdrac {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
#[test]
fn test_serialize_physical_host_with_hp_ilo() {
// Create a PhysicalHost with HP iLO management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockHPIlo {
ip: "192.168.1.100".to_string(),
username: "admin".to_string(),
password: "password123".to_string(),
firmware_version: "2.5.0".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())],
memory_size: Some(64_000_000),
cpu_count: Some(16),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the HP iLO details
assert!(json.contains("192.168.1.100"));
assert!(json.contains("admin"));
assert!(json.contains("password123"));
assert!(json.contains("firmware_version"));
assert!(json.contains("2.5.0"));
// Parse back to verify structure (not the exact management interface)
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 16);
assert_eq!(parsed["memory_size"], 64_000_000);
assert_eq!(parsed["network"][0]["name"], "");
}
#[test]
fn test_serialize_physical_host_with_dell_idrac() {
// Create a PhysicalHost with Dell iDRAC management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockDellIdrac {
hostname: "idrac-server01".to_string(),
port: 443,
api_token: "abcdef123456".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("env".to_string(), "production".to_string())],
memory_size: Some(128_000_000),
cpu_count: Some(32),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the Dell iDRAC details
assert!(json.contains("idrac-server01"));
assert!(json.contains("443"));
assert!(json.contains("abcdef123456"));
// Parse back to verify structure
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 32);
assert_eq!(parsed["memory_size"], 128_000_000);
assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null);
}
#[test]
fn test_different_management_implementations_produce_valid_json() {
// Create hosts with different management implementations
let host1 = PhysicalHost {
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockHPIlo {
ip: "10.0.0.1".to_string(),
username: "root".to_string(),
password: "secret".to_string(),
firmware_version: "3.0.0".to_string(),
}),
storage: vec![],
labels: vec![],
memory_size: None,
cpu_count: None,
};
let host2 = PhysicalHost {
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockDellIdrac {
hostname: "server02-idrac".to_string(),
port: 8443,
api_token: "token123".to_string(),
}),
storage: vec![],
labels: vec![],
memory_size: None,
cpu_count: None,
};
// Both should serialize successfully
let json1 = serde_json::to_string(&host1).expect("Failed to serialize host1");
let json2 = serde_json::to_string(&host2).expect("Failed to serialize host2");
// Both JSONs should be valid and parseable
let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1");
let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2");
// The JSONs should be different because they contain different management interfaces
assert_ne!(json1, json2);
}
}

View File

@@ -7,7 +7,6 @@ use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::HAClusterTopology,
};
pub enum InterpretName {
@@ -20,6 +19,7 @@ pub enum InterpretName {
Dummy,
Panic,
OPNSense,
K3dInstallation,
}
impl std::fmt::Display for InterpretName {
@@ -34,17 +34,15 @@ impl std::fmt::Display for InterpretName {
InterpretName::Dummy => f.write_str("Dummy"),
InterpretName::Panic => f.write_str("Panic"),
InterpretName::OPNSense => f.write_str("OPNSense"),
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
}
}
}
#[async_trait]
pub trait Interpret: std::fmt::Debug + Send {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError>;
pub trait Interpret<T>: std::fmt::Debug + Send {
async fn execute(&self, inventory: &Inventory, topology: &T)
-> Result<Outcome, InterpretError>;
fn get_name(&self) -> InterpretName;
fn get_version(&self) -> Version;
fn get_status(&self) -> InterpretStatus;

View File

@@ -1,41 +1,82 @@
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use log::info;
use log::{info, warn};
use super::{
interpret::{Interpret, InterpretError, Outcome},
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
type ScoreVec = Vec<Box<dyn Score>>;
type ScoreVec<T> = Vec<Box<dyn Score<T>>>;
pub struct Maestro {
pub struct Maestro<T: Topology> {
inventory: Inventory,
topology: HAClusterTopology,
scores: Arc<RwLock<ScoreVec>>,
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_preparation_result: Mutex<Option<Outcome>>,
}
impl Maestro {
pub fn new(inventory: Inventory, topology: HAClusterTopology) -> Self {
impl<T: Topology> Maestro<T> {
pub fn new(inventory: Inventory, topology: T) -> Self {
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_preparation_result: None.into(),
}
}
pub fn start(&mut self) {
info!("Starting Maestro");
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let instance = Self::new(inventory, topology);
instance.prepare_topology().await?;
Ok(instance)
}
pub fn register_all(&mut self, mut scores: ScoreVec) {
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
info!("Ensuring topology '{}' is ready...", self.topology.name());
let outcome = self.topology.ensure_ready().await?;
info!(
"Topology '{}' readiness check complete: {}",
self.topology.name(),
outcome.status
);
self.topology_preparation_result
.lock()
.unwrap()
.replace(outcome.clone());
Ok(outcome)
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
let mut score_mut = self.scores.write().expect("Should acquire lock");
score_mut.append(&mut scores);
}
pub async fn interpret(&self, score: Box<dyn Score>) -> Result<Outcome, InterpretError> {
fn is_topology_initialized(&self) -> bool {
let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() {
match outcome.status {
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else {
false
}
}
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
if !self.is_topology_initialized() {
warn!(
"Launching interpret for score {} but Topology {} is not fully initialized!",
score.name(),
self.topology.name(),
);
}
info!("Running score {score:?}");
let interpret = score.create_interpret();
info!("Launching interpret {interpret:?}");
@@ -44,7 +85,7 @@ impl Maestro {
result
}
pub fn scores(&self) -> Arc<RwLock<ScoreVec>> {
pub fn scores(&self) -> Arc<RwLock<ScoreVec<T>>> {
self.scores.clone()
}
}

View File

@@ -1,3 +1,4 @@
pub mod config;
pub mod data;
pub mod executors;
pub mod filter;

View File

@@ -1,7 +1,231 @@
use super::interpret::Interpret;
use std::collections::BTreeMap;
pub trait Score: std::fmt::Debug + Send + Sync {
fn create_interpret(&self) -> Box<dyn Interpret>;
use serde::Serialize;
use serde_value::Value;
use super::{interpret::Interpret, topology::Topology};
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn Score>;
}
pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value;
}
impl<'de, S, T> SerializeScore<T> for S
where
T: Topology,
S: Score<T> + Serialize,
{
fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble
// up?
serde_value::to_value(&self).expect("Score should serialize successfully")
}
}
pub trait CloneBoxScore<T: Topology> {
fn clone_box(&self) -> Box<dyn Score<T>>;
}
impl<S, T> CloneBoxScore<T> for S
where
T: Topology,
S: Score<T> + Clone + 'static,
{
fn clone_box(&self) -> Box<dyn Score<T>> {
Box::new(self.clone())
}
}
pub trait ScoreToString<T: Topology> {
fn print_score_details(&self) -> String;
fn format_value_as_string(&self, val: &Value, indent: usize) -> String;
fn format_map(&self, map: &BTreeMap<Value, Value>, indent: usize) -> String;
fn wrap_or_truncate(&self, s: &str, width: usize) -> Vec<String>;
}
impl<S, T> ScoreToString<T> for S
where
T: Topology,
S: Score<T> + 'static,
{
fn print_score_details(&self) -> String {
let mut output = String::new();
output += "\n";
output += &self.format_value_as_string(&self.serialize(), 0);
output += "\n";
output
}
fn format_map(&self, map: &BTreeMap<Value, Value>, indent: usize) -> String {
let pad = " ".repeat(indent * 2);
let mut output = String::new();
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n",
pad
);
output += &format!("{}| {:<24} | {:<48} |\n", pad, "score_name", self.name());
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n",
pad
);
for (k, v) in map {
let key_str = match k {
Value::String(s) => s.clone(),
other => format!("{:?}", other),
};
let formatted_val = self.format_value_as_string(v, indent + 1);
let lines = formatted_val.lines().map(|line| line.trim_start());
let wrapped_lines: Vec<_> = lines
.flat_map(|line| self.wrap_or_truncate(line.trim_start(), 48))
.collect();
if let Some(first) = wrapped_lines.first() {
output += &format!("{}| {:<24} | {:<48} |\n", pad, key_str, first);
for line in &wrapped_lines[1..] {
output += &format!("{}| {:<24} | {:<48} |\n", pad, "", line);
}
}
// let first_line = lines.next().unwrap_or("");
// output += &format!("{}| {:<24} | {:<48} |\n", pad, key_str, first_line);
//
// for line in lines {
// output += &format!("{}| {:<24} | {:<48} |\n", pad, "", line);
// }
}
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n\n",
pad
);
output
}
fn wrap_or_truncate(&self, s: &str, width: usize) -> Vec<String> {
let mut lines = Vec::new();
let mut current = s;
while !current.is_empty() {
if current.len() <= width {
lines.push(current.to_string());
break;
}
// Try to wrap at whitespace if possible
let mut split_index = current[..width].rfind(' ').unwrap_or(width);
if split_index == 0 {
split_index = width;
}
lines.push(current[..split_index].trim_end().to_string());
current = current[split_index..].trim_start();
}
lines
}
fn format_value_as_string(&self, val: &Value, indent: usize) -> String {
let pad = " ".repeat(indent * 2);
let mut output = String::new();
match val {
Value::Bool(b) => output += &format!("{}{}\n", pad, b),
Value::U8(u) => output += &format!("{}{}\n", pad, u),
Value::U16(u) => output += &format!("{}{}\n", pad, u),
Value::U32(u) => output += &format!("{}{}\n", pad, u),
Value::U64(u) => output += &format!("{}{}\n", pad, u),
Value::I8(i) => output += &format!("{}{}\n", pad, i),
Value::I16(i) => output += &format!("{}{}\n", pad, i),
Value::I32(i) => output += &format!("{}{}\n", pad, i),
Value::I64(i) => output += &format!("{}{}\n", pad, i),
Value::F32(f) => output += &format!("{}{}\n", pad, f),
Value::F64(f) => output += &format!("{}{}\n", pad, f),
Value::Char(c) => output += &format!("{}{}\n", pad, c),
Value::String(s) => output += &format!("{}{:<48}\n", pad, s),
Value::Unit => output += &format!("{}<unit>\n", pad),
Value::Bytes(bytes) => output += &format!("{}{:?}\n", pad, bytes),
Value::Option(opt) => match opt {
Some(inner) => {
output += &format!("{}Option:\n", pad);
output += &self.format_value_as_string(inner, indent + 1);
}
None => output += &format!("{}None\n", pad),
},
Value::Newtype(inner) => {
output += &format!("{}Newtype:\n", pad);
output += &self.format_value_as_string(inner, indent + 1);
}
Value::Seq(seq) => {
if seq.is_empty() {
output += &format!("{}[]\n", pad);
} else {
output += &format!("{}[\n", pad);
for item in seq {
output += &self.format_value_as_string(item, indent + 1);
}
output += &format!("{}]\n", pad);
}
}
Value::Map(map) => {
if map.is_empty() {
output += &format!("{}<empty map>\n", pad);
} else if indent == 0 {
output += &self.format_map(map, indent);
} else {
for (k, v) in map {
let key_str = match k {
Value::String(s) => s.clone(),
other => format!("{:?}", other),
};
let val_str = self
.format_value_as_string(v, indent + 1)
.trim()
.to_string();
let val_lines: Vec<_> = val_str.lines().collect();
output +=
&format!("{}{}: {}\n", pad, key_str, val_lines.first().unwrap_or(&""));
for line in val_lines.iter().skip(1) {
output += &format!("{} {}\n", pad, line);
}
}
}
}
}
output
}
}
//TODO write test to check that the output is what it should be
//
#[cfg(test)]
mod tests {
use super::*;
use crate::modules::dns::DnsScore;
use crate::topology::HAClusterTopology;
#[test]
fn test_format_values_as_string() {
let dns_score = Box::new(DnsScore::new(vec![], None));
let print_score_output =
<DnsScore as ScoreToString<HAClusterTopology>>::print_score_details(&dns_score);
let expected_empty_dns_score_table = "\n+--------------------------+--------------------------------------------------+\n| score_name | DnsScore |\n+--------------------------+--------------------------------------------------+\n| dns_entries | [] |\n| register_dhcp_leases | None |\n+--------------------------+--------------------------------------------------+\n\n\n";
assert_eq!(print_score_output, expected_empty_dns_score_table);
}
}

View File

@@ -1,8 +1,11 @@
use async_trait::async_trait;
use harmony_macros::ip;
use harmony_types::net::MacAddress;
use log::info;
use crate::executors::ExecutorError;
use crate::interpret::InterpretError;
use crate::interpret::Outcome;
use super::DHCPStaticEntry;
use super::DhcpServer;
@@ -12,14 +15,16 @@ use super::DnsServer;
use super::Firewall;
use super::HttpServer;
use super::IpAddress;
use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::Router;
use super::TftpServer;
use super::Topology;
use super::Url;
use super::openshift::OpenshiftClient;
use super::k8s::K8sClient;
use std::sync::Arc;
#[derive(Debug, Clone)]
@@ -38,11 +43,28 @@ pub struct HAClusterTopology {
pub switch: Vec<LogicalHost>,
}
impl HAClusterTopology {
pub async fn oc_client(&self) -> Result<Arc<OpenshiftClient>, kube::Error> {
Ok(Arc::new(OpenshiftClient::try_default().await?))
#[async_trait]
impl Topology for HAClusterTopology {
fn name(&self) -> &str {
todo!()
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
todo!(
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
)
}
}
#[async_trait]
impl K8sclient for HAClusterTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
Ok(Arc::new(
K8sClient::try_default().await.map_err(|e| e.to_string())?,
))
}
}
impl HAClusterTopology {
pub fn autoload() -> Self {
let dummy_infra = Arc::new(DummyInfra {});
let dummy_host = LogicalHost {
@@ -67,7 +89,167 @@ impl HAClusterTopology {
}
}
struct DummyInfra;
#[async_trait]
impl DnsServer for HAClusterTopology {
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
self.dns_server.register_dhcp_leases(register).await
}
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
self.dns_server.register_hosts(hosts).await
}
fn remove_record(&self, name: &str, record_type: DnsRecordType) -> Result<(), ExecutorError> {
self.dns_server.remove_record(name, record_type)
}
async fn list_records(&self) -> Vec<DnsRecord> {
self.dns_server.list_records().await
}
fn get_ip(&self) -> IpAddress {
self.dns_server.get_ip()
}
fn get_host(&self) -> LogicalHost {
self.dns_server.get_host()
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.dns_server.commit_config().await
}
}
#[async_trait]
impl LoadBalancer for HAClusterTopology {
fn get_ip(&self) -> IpAddress {
self.load_balancer.get_ip()
}
fn get_host(&self) -> LogicalHost {
self.load_balancer.get_host()
}
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
self.load_balancer.add_service(service).await
}
async fn remove_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
self.load_balancer.remove_service(service).await
}
async fn list_services(&self) -> Vec<LoadBalancerService> {
self.load_balancer.list_services().await
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.load_balancer.ensure_initialized().await
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.load_balancer.commit_config().await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.load_balancer.reload_restart().await
}
}
#[async_trait]
impl DhcpServer for HAClusterTopology {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> {
self.dhcp_server.add_static_mapping(entry).await
}
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError> {
self.dhcp_server.remove_static_mapping(mac).await
}
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
self.dhcp_server.list_static_mappings().await
}
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
self.dhcp_server.set_next_server(ip).await
}
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_boot_filename(boot_filename).await
}
fn get_ip(&self) -> IpAddress {
self.dhcp_server.get_ip()
}
fn get_host(&self) -> LogicalHost {
self.dhcp_server.get_host()
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.dhcp_server.commit_config().await
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename(filename).await
}
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename64(filename64).await
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filenameipxe(filenameipxe).await
}
}
#[async_trait]
impl TftpServer for HAClusterTopology {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
self.tftp_server.serve_files(url).await
}
fn get_ip(&self) -> IpAddress {
self.tftp_server.get_ip()
}
async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> {
self.tftp_server.set_ip(ip).await
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.tftp_server.ensure_initialized().await
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.tftp_server.commit_config().await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.tftp_server.reload_restart().await
}
}
impl Router for HAClusterTopology {
fn get_gateway(&self) -> super::IpAddress {
self.router.get_gateway()
}
fn get_cidr(&self) -> cidr::Ipv4Cidr {
self.router.get_cidr()
}
fn get_host(&self) -> LogicalHost {
self.router.get_host()
}
}
#[async_trait]
impl HttpServer for HAClusterTopology {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
self.http_server.serve_files(url).await
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[derive(Debug)]
pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
todo!()
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(Outcome::success(dummy_msg.to_string()))
}
}
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";
@@ -219,11 +401,7 @@ impl DnsServer for DummyInfra {
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn remove_record(
&mut self,
_name: &str,
_record_type: DnsRecordType,
) -> Result<(), ExecutorError> {
fn remove_record(&self, _name: &str, _record_type: DnsRecordType) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn list_records(&self) -> Vec<DnsRecord> {

View File

@@ -0,0 +1 @@
pub trait HelmCommand {}

View File

@@ -1,4 +1,5 @@
use derive_new::new;
use serde::Serialize;
use crate::hardware::PhysicalHost;
@@ -8,7 +9,7 @@ use super::LogicalHost;
///
/// This is the only construct that directly maps a logical host to a physical host.
/// It serves as a bridge between the logical cluster structure and the physical infrastructure.
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct HostBinding {
/// Reference to the LogicalHost
pub logical_host: LogicalHost,

View File

@@ -1,12 +1,14 @@
use derive_new::new;
use k8s_openapi::NamespaceResourceScope;
use kube::{Api, Client, Error, Resource, api::PostParams};
use serde::de::DeserializeOwned;
pub struct OpenshiftClient {
#[derive(new)]
pub struct K8sClient {
client: Client,
}
impl OpenshiftClient {
impl K8sClient {
pub async fn try_default() -> Result<Self, Error> {
Ok(Self {
client: Client::try_default().await?,
@@ -36,7 +38,7 @@ impl OpenshiftClient {
Ok(result)
}
pub async fn apply_namespaced<K>(&self, resource: &Vec<K>) -> Result<K, Error>
pub async fn apply_namespaced<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<K, Error>
where
K: Resource<Scope = NamespaceResourceScope>
+ Clone
@@ -47,7 +49,10 @@ impl OpenshiftClient {
<K as kube::Resource>::DynamicType: Default,
{
for r in resource.iter() {
let api: Api<K> = Api::default_namespaced(self.client.clone());
let api: Api<K> = match ns {
Some(ns) => Api::namespaced(self.client.clone(), ns),
None => Api::default_namespaced(self.client.clone()),
};
api.create(&PostParams::default(), &r).await?;
}
todo!("")

View File

@@ -0,0 +1,202 @@
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use inquire::Confirm;
use log::{info, warn};
use tokio::sync::OnceCell;
use crate::{
interpret::{InterpretError, Outcome},
inventory::Inventory,
maestro::Maestro,
modules::k3d::K3DInstallationScore,
topology::LocalhostTopology,
};
use super::{HelmCommand, K8sclient, Topology, k8s::K8sClient};
struct K8sState {
client: Arc<K8sClient>,
_source: K8sSource,
message: String,
}
enum K8sSource {
LocalK3d,
}
pub struct K8sAnywhereTopology {
k8s_state: OnceCell<Option<K8sState>>,
}
#[async_trait]
impl K8sclient for K8sAnywhereTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
let state = match self.k8s_state.get() {
Some(state) => state,
None => return Err("K8s state not initialized yet".to_string()),
};
let state = match state {
Some(state) => state,
None => return Err("K8s client initialized but empty".to_string()),
};
Ok(state.client.clone())
}
}
impl K8sAnywhereTopology {
pub fn new() -> Self {
Self {
k8s_state: OnceCell::new(),
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
.output()
.map_err(|e| format!("Failed to execute 'helm -version': {}", e))?;
if !version_result.status.success() {
return Err("Failed to run 'helm -version'".to_string());
}
// Print the version output
let version_output = String::from_utf8_lossy(&version_result.stdout);
println!("Helm version: {}", version_output.trim());
Ok(())
}
async fn try_load_system_kubeconfig(&self) -> Option<K8sClient> {
todo!("Use kube-rs default behavior to load system kubeconfig");
}
async fn try_load_kubeconfig(&self, path: &str) -> Option<K8sClient> {
todo!("Use kube-rs to load kubeconfig at path {path}");
}
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
let maestro = Maestro::initialize(Inventory::autoload(), LocalhostTopology::new()).await?;
let k3d_score = self.get_k3d_installation_score();
maestro.interpret(Box::new(k3d_score)).await?;
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
let k8s_anywhere_config = K8sAnywhereConfig {
kubeconfig: std::env::var("HARMONY_KUBECONFIG")
.ok()
.map(|v| v.to_string()),
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
};
if k8s_anywhere_config.use_system_kubeconfig {
match self.try_load_system_kubeconfig().await {
Some(_client) => todo!(),
None => todo!(),
}
}
if let Some(kubeconfig) = k8s_anywhere_config.kubeconfig {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(_client) => todo!(),
None => todo!(),
}
}
info!("No kubernetes configuration found");
if !k8s_anywhere_config.autoinstall {
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
if !confirmation {
warn!(
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
);
return Ok(None);
}
}
info!("Starting K8sAnywhere installation");
self.try_install_k3d().await?;
let k3d_score = self.get_k3d_installation_score();
// I feel like having to rely on the k3d_rs crate here is a smell
// I think we should have a way to interact more deeply with scores/interpret. Maybe the
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
// good implementation due to the stateful nature of the k3d thing. Which is why I went
// with this solution for now
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),
_source: K8sSource::LocalK3d,
message: "Successfully installed K3D cluster and acquired client".to_string(),
},
Err(_) => todo!(),
};
Ok(Some(state))
}
}
struct K8sAnywhereConfig {
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
/// cluster
///
/// Default : None
kubeconfig: Option<String>,
/// Whether to use the system KUBECONFIG, either the environment variable or the file in the
/// default or configured location
///
/// Default : false
use_system_kubeconfig: bool,
/// Whether to install automatically a kubernetes cluster
///
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
///
/// Default: true
autoinstall: bool,
}
#[async_trait]
impl Topology for K8sAnywhereTopology {
fn name(&self) -> &str {
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
match self.is_helm_available() {
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}
impl HelmCommand for K8sAnywhereTopology {}

View File

@@ -2,6 +2,7 @@ use std::{net::SocketAddr, str::FromStr};
use async_trait::async_trait;
use log::debug;
use serde::Serialize;
use super::{IpAddress, LogicalHost};
use crate::executors::ExecutorError;
@@ -36,20 +37,21 @@ impl std::fmt::Debug for dyn LoadBalancer {
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
}
}
#[derive(Debug, PartialEq, Clone)]
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct LoadBalancerService {
pub backend_servers: Vec<BackendServer>,
pub listening_port: SocketAddr,
pub health_check: Option<HealthCheck>,
}
#[derive(Debug, PartialEq, Clone)]
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct BackendServer {
// TODO should not be a string, probably IPAddress
pub address: String,
pub port: u16,
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HttpMethod {
GET,
POST,
@@ -91,14 +93,14 @@ impl std::fmt::Display for HttpMethod {
}
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HttpStatusCode {
Success2xx,
UserError4xx,
ServerError5xx,
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HealthCheck {
HTTP(String, HttpMethod, HttpStatusCode),
TCP(Option<u16>),

View File

@@ -0,0 +1,25 @@
use async_trait::async_trait;
use derive_new::new;
use crate::interpret::{InterpretError, Outcome};
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
#[async_trait]
impl Topology for LocalhostTopology {
fn name(&self) -> &str {
"LocalHostTopology"
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
Ok(Outcome::success(
"Localhost is Chuck Norris, always ready.".to_string(),
))
}
}
// TODO: Delete this, temp for test
impl HelmCommand for LocalhostTopology {}

View File

@@ -1,10 +1,15 @@
mod ha_cluster;
mod host_binding;
mod http;
mod k8s_anywhere;
mod localhost;
pub use k8s_anywhere::*;
pub use localhost::*;
pub mod k8s;
mod load_balancer;
pub mod openshift;
mod router;
mod tftp;
use async_trait::async_trait;
pub use ha_cluster::*;
pub use load_balancer::*;
pub use router::*;
@@ -12,23 +17,73 @@ mod network;
pub use host_binding::*;
pub use http::*;
pub use network::*;
use serde::Serialize;
pub use tftp::*;
use std::{net::IpAddr, sync::Arc};
mod helm_command;
pub use helm_command::*;
use std::net::IpAddr;
use super::interpret::{InterpretError, Outcome};
/// Represents a logical view of an infrastructure environment providing specific capabilities.
///
/// A Topology acts as a self-contained "package" responsible for managing access
/// to its underlying resources and ensuring they are in a ready state before use.
/// It defines the contract for the capabilities it provides through implemented
/// capability traits (e.g., `HasK8sCapability`, `HasDnsServer`).
#[async_trait]
pub trait Topology: Send + Sync {
/// Returns a unique identifier or name for this specific topology instance.
/// This helps differentiate between multiple instances of potentially the same type.
fn name(&self) -> &str;
/// Ensures that the topology and its required underlying components or services
/// are ready to provide their declared capabilities.
///
/// Implementations of this method MUST be idempotent. Subsequent calls after a
/// successful readiness check should ideally be cheap NO-OPs.
///
/// This method encapsulates the logic for:
/// 1. **Checking Current State:** Assessing if the required resources/services are already running and configured.
/// 2. **Discovery:** Identifying the runtime environment (e.g., local Docker, AWS, existing cluster).
/// 3. **Initialization/Bootstrapping:** Performing necessary setup actions if not already ready. This might involve:
/// * Making API calls.
/// * Running external commands (e.g., `k3d`, `docker`).
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
///
/// # Returns
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
}
pub type IpAddress = IpAddr;
#[derive(Debug, Clone)]
pub enum Url {
LocalFolder(String),
Remote(url::Url),
Url(url::Url),
}
impl Serialize for Url {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(&url.as_str()),
}
}
}
impl std::fmt::Display for Url {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Url::LocalFolder(path) => write!(f, "{}", path),
Url::Remote(url) => write!(f, "{}", url),
Url::Url(url) => write!(f, "{}", url),
}
}
}
@@ -42,7 +97,7 @@ impl std::fmt::Display for Url {
/// - A control plane node
///
/// This abstraction focuses on the logical role and services, independent of the physical hardware.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct LogicalHost {
/// The IP address of this logical host.
pub ip: IpAddress,
@@ -124,3 +179,23 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn test_serialize_local_folder() {
let url = Url::LocalFolder("path/to/folder".to_string());
let serialized = serde_json::to_string(&url).unwrap();
assert_eq!(serialized, "\"path/to/folder\"");
}
#[test]
fn test_serialize_url() {
let url = Url::Url(url::Url::parse("https://example.com").unwrap());
let serialized = serde_json::to_string(&url).unwrap();
assert_eq!(serialized, "\"https://example.com/\"");
}
}

View File

@@ -1,11 +1,12 @@
use std::{net::Ipv4Addr, str::FromStr};
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use serde::Serialize;
use crate::executors::ExecutorError;
use super::{IpAddress, LogicalHost};
use super::{IpAddress, LogicalHost, k8s::K8sClient};
#[derive(Debug)]
pub struct DHCPStaticEntry {
@@ -40,9 +41,13 @@ impl std::fmt::Debug for dyn Firewall {
pub struct NetworkDomain {
pub name: String,
}
#[async_trait]
pub trait K8sclient: Send + Sync {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
}
#[async_trait]
pub trait DhcpServer: Send + Sync {
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
@@ -56,21 +61,11 @@ pub trait DhcpServer: Send + Sync {
async fn commit_config(&self) -> Result<(), ExecutorError>;
}
impl std::fmt::Debug for dyn DhcpServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("DhcpServer {}", self.get_ip()))
}
}
#[async_trait]
pub trait DnsServer: Send + Sync {
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError>;
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError>;
fn remove_record(
&mut self,
name: &str,
record_type: DnsRecordType,
) -> Result<(), ExecutorError>;
fn remove_record(&self, name: &str, record_type: DnsRecordType) -> Result<(), ExecutorError>;
async fn list_records(&self) -> Vec<DnsRecord>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
@@ -121,7 +116,7 @@ pub enum Action {
Deny,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub enum DnsRecordType {
A,
AAAA,
@@ -142,7 +137,7 @@ impl std::fmt::Display for DnsRecordType {
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct DnsRecord {
pub host: String,
pub domain: String,
@@ -258,7 +253,7 @@ mod test {
}
fn remove_record(
&mut self,
&self,
_name: &str,
_record_type: DnsRecordType,
) -> Result<(), ExecutorError> {

View File

@@ -3,8 +3,9 @@ use crate::topology::IpAddress;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct HPIlo {
ip_address: Option<IpAddress>,
mac_address: Option<MacAddress>,

View File

@@ -2,8 +2,9 @@ use crate::hardware::ManagementInterface;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct IntelAmtManagement {
mac_address: MacAddress,
}

View File

@@ -30,7 +30,7 @@ impl DnsServer for OPNSenseFirewall {
}
fn remove_record(
&mut self,
&self,
_name: &str,
_record_type: crate::topology::DnsRecordType,
) -> Result<(), ExecutorError> {

View File

@@ -22,7 +22,7 @@ impl HttpServer for OPNSenseFirewall {
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}
Url::Remote(_url) => todo!(),
Url::Url(_url) => todo!(),
}
Ok(())
}

View File

@@ -1,7 +1,8 @@
use crate::hardware::ManagementInterface;
use derive_new::new;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct OPNSenseManagementInterface {}
impl ManagementInterface for OPNSenseManagementInterface {

View File

@@ -22,7 +22,7 @@ impl TftpServer for OPNSenseFirewall {
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}
Url::Remote(url) => todo!("This url is not supported yet {url}"),
Url::Url(url) => todo!("This url is not supported yet {url}"),
}
Ok(())
}

View File

@@ -1,19 +1,18 @@
use std::sync::Arc;
use async_trait::async_trait;
use derive_new::new;
use log::info;
use serde::Serialize;
use crate::{
domain::{data::Version, interpret::InterpretStatus},
interpret::{Interpret, InterpretError, InterpretName, Outcome},
inventory::Inventory,
topology::{DHCPStaticEntry, HAClusterTopology, HostBinding, IpAddress},
topology::{DHCPStaticEntry, DhcpServer, HostBinding, IpAddress, Topology},
};
use crate::domain::score::Score;
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct DhcpScore {
pub host_binding: Vec<HostBinding>,
pub next_server: Option<IpAddress>,
@@ -23,18 +22,14 @@ pub struct DhcpScore {
pub filenameipxe: Option<String>,
}
impl Score for DhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DhcpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"DhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
@@ -55,10 +50,10 @@ impl DhcpInterpret {
status: InterpretStatus::QUEUED,
}
}
async fn add_static_entries(
async fn add_static_entries<D: DhcpServer>(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
dhcp_server: &D,
) -> Result<Outcome, InterpretError> {
let dhcp_entries: Vec<DHCPStaticEntry> = self
.score
@@ -81,7 +76,6 @@ impl DhcpInterpret {
.collect();
info!("DHCPStaticEntry : {:?}", dhcp_entries);
let dhcp_server = Arc::new(topology.dhcp_server.clone());
info!("DHCP server : {:?}", dhcp_server);
let number_new_entries = dhcp_entries.len();
@@ -99,14 +93,13 @@ impl DhcpInterpret {
))
}
async fn set_pxe_options(
async fn set_pxe_options<D: DhcpServer>(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
dhcp_server: &D,
) -> Result<Outcome, InterpretError> {
let next_server_outcome = match self.score.next_server {
Some(next_server) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_next_server(next_server).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@@ -118,7 +111,6 @@ impl DhcpInterpret {
let boot_filename_outcome = match &self.score.boot_filename {
Some(boot_filename) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_boot_filename(&boot_filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@@ -130,7 +122,6 @@ impl DhcpInterpret {
let filename_outcome = match &self.score.filename {
Some(filename) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_filename(&filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@@ -142,7 +133,6 @@ impl DhcpInterpret {
let filename64_outcome = match &self.score.filename64 {
Some(filename64) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_filename64(&filename64).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@@ -154,7 +144,6 @@ impl DhcpInterpret {
let filenameipxe_outcome = match &self.score.filenameipxe {
Some(filenameipxe) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_filenameipxe(&filenameipxe).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@@ -184,7 +173,7 @@ impl DhcpInterpret {
}
#[async_trait]
impl Interpret for DhcpInterpret {
impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDHCP
}
@@ -204,15 +193,15 @@ impl Interpret for DhcpInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!("Executing {} on inventory {inventory:?}", self.get_name());
info!("Executing DhcpInterpret on inventory {inventory:?}");
self.set_pxe_options(inventory, topology).await?;
self.add_static_entries(inventory, topology).await?;
topology.dhcp_server.commit_config().await?;
topology.commit_config().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,

View File

@@ -1,33 +1,30 @@
use async_trait::async_trait;
use derive_new::new;
use log::info;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{DnsRecord, HAClusterTopology},
topology::{DnsRecord, DnsServer, Topology},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct DnsScore {
dns_entries: Vec<DnsRecord>,
register_dhcp_leases: Option<bool>,
}
impl Score for DnsScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DnsServer> Score<T> for DnsScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DnsInterpret::new(self.clone()))
}
fn name(&self) -> String {
"DnsScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
@@ -48,12 +45,11 @@ impl DnsInterpret {
status: InterpretStatus::QUEUED,
}
}
async fn serve_dhcp_entries(
async fn serve_dhcp_entries<T: DnsServer>(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
dns: &T,
) -> Result<Outcome, InterpretError> {
let dns = topology.dns_server.clone();
if let Some(register) = self.score.register_dhcp_leases {
dns.register_dhcp_leases(register).await?;
}
@@ -64,15 +60,12 @@ impl DnsInterpret {
))
}
async fn ensure_hosts_registered(
async fn ensure_hosts_registered<D: DnsServer>(
&self,
topology: &HAClusterTopology,
dns_server: &D,
) -> Result<Outcome, InterpretError> {
let entries = &self.score.dns_entries;
topology
.dns_server
.ensure_hosts_registered(entries.clone())
.await?;
dns_server.ensure_hosts_registered(entries.clone()).await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
@@ -85,7 +78,7 @@ impl DnsInterpret {
}
#[async_trait]
impl Interpret for DnsInterpret {
impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDns
}
@@ -105,14 +98,17 @@ impl Interpret for DnsInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!("Executing {} on inventory {inventory:?}", self.get_name());
info!(
"Executing {} on inventory {inventory:?}",
<DnsInterpret as Interpret<T>>::get_name(self)
);
self.serve_dhcp_entries(inventory, topology).await?;
self.ensure_hosts_registered(&topology).await?;
self.ensure_hosts_registered(topology).await?;
topology.dns_server.commit_config().await?;
topology.commit_config().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,

View File

@@ -1,20 +1,21 @@
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
/// Score that always errors. This is only useful for development/testing purposes. It does nothing
/// except returning Err(InterpretError) when interpreted.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct ErrorScore;
impl Score for ErrorScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
impl<T: Topology> Score<T> for ErrorScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(DummyInterpret {
result: Err(InterpretError::new("Error Score default error".to_string())),
status: InterpretStatus::QUEUED,
@@ -24,19 +25,15 @@ impl Score for ErrorScore {
fn name(&self) -> String {
"ErrorScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// Score that always succeeds. This is only useful for development/testing purposes. It does nothing
/// except returning Ok(Outcome::success) when interpreted.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct SuccessScore;
impl Score for SuccessScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
impl<T: Topology> Score<T> for SuccessScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DummyInterpret {
result: Ok(Outcome::success("SuccessScore default success".to_string())),
status: InterpretStatus::QUEUED,
@@ -46,10 +43,6 @@ impl Score for SuccessScore {
fn name(&self) -> String {
"SuccessScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// An interpret that only returns the result it is given when built. It does nothing else. Only
@@ -61,7 +54,7 @@ struct DummyInterpret {
}
#[async_trait]
impl Interpret for DummyInterpret {
impl<T: Topology> Interpret<T> for DummyInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Dummy
}
@@ -81,7 +74,7 @@ impl Interpret for DummyInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
self.result.clone()
}
@@ -89,21 +82,17 @@ impl Interpret for DummyInterpret {
/// Score that always panics. This is only useful for development/testing purposes. It does nothing
/// except panic! with an error message when interpreted
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct PanicScore;
impl Score for PanicScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
impl<T: Topology> Score<T> for PanicScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(PanicInterpret {})
}
fn name(&self) -> String {
"PanicScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// An interpret that always panics when executed. Useful for development/testing purposes.
@@ -111,7 +100,7 @@ impl Score for PanicScore {
struct PanicInterpret;
#[async_trait]
impl Interpret for PanicInterpret {
impl<T: Topology> Interpret<T> for PanicInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Panic
}
@@ -131,7 +120,7 @@ impl Interpret for PanicInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
panic!("Panic interpret always panics when executed")
}

View File

@@ -0,0 +1,157 @@
use crate::data::{Id, Version};
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
use crate::inventory::Inventory;
use crate::score::Score;
use crate::topology::{HelmCommand, Topology};
use async_trait::async_trait;
use helm_wrapper_rs;
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
use log::info;
pub use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::collections::HashMap;
use std::path::Path;
use std::str::FromStr;
use temp_file::TempFile;
#[derive(Debug, Clone, Serialize)]
pub struct HelmChartScore {
pub namespace: Option<NonBlankString>,
pub release_name: NonBlankString,
pub chart_name: NonBlankString,
pub chart_version: Option<NonBlankString>,
pub values_overrides: Option<HashMap<NonBlankString, String>>,
pub values_yaml: Option<String>,
pub create_namespace: bool,
/// Wether to run `helm upgrade --install` under the hood or only install when not present
pub install_only: bool,
}
impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HelmChartInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
format!("{} {} HelmChartScore", self.release_name, self.chart_name)
}
}
#[derive(Debug, Serialize)]
pub struct HelmChartInterpret {
pub score: HelmChartScore,
}
#[async_trait]
impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let ns = self
.score
.namespace
.as_ref()
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
let tf: TempFile;
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
Some(yaml_str) => {
tf = temp_file::with_contents(yaml_str.as_bytes());
Some(tf.path())
}
None => None,
};
let helm_executor = DefaultHelmExecutor::new();
let mut helm_options = Vec::new();
if self.score.create_namespace {
helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
}
if self.score.install_only {
let chart_list = match helm_executor.list(Some(ns)) {
Ok(charts) => charts,
Err(e) => {
return Err(InterpretError::new(format!(
"Failed to list scores in namespace {:?} because of error : {}",
self.score.namespace, e
)));
}
};
if chart_list
.iter()
.any(|item| item.name == self.score.release_name.to_string())
{
info!(
"Release '{}' already exists in namespace '{}'. Skipping installation as install_only is true.",
self.score.release_name, ns
);
return Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
self.score.release_name
),
));
} else {
info!(
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
self.score.release_name, ns
);
}
}
let res = helm_executor.install_or_upgrade(
&ns,
&self.score.release_name,
&self.score.chart_name,
self.score.chart_version.as_ref(),
self.score.values_overrides.as_ref(),
yaml_path,
Some(&helm_options),
);
let status = match res {
Ok(status) => status,
Err(err) => return Err(InterpretError::new(err.to_string())),
};
match status {
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Helm Chart deployed".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
InterpretStatus::RUNNING,
"Helm Chart Pending install".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
InterpretStatus::RUNNING,
"Helm Chart pending upgrade".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(
"Failed to install helm chart".to_string(),
)),
}
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1 @@
pub mod chart;

View File

@@ -1,31 +1,28 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, Url},
topology::{HttpServer, Topology, Url},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct HttpScore {
files_to_serve: Url,
}
impl Score for HttpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + HttpServer> Score<T> for HttpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HttpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"HttpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
@@ -34,13 +31,12 @@ pub struct HttpInterpret {
}
#[async_trait]
impl Interpret for HttpInterpret {
impl<T: Topology + HttpServer> Interpret<T> for HttpInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
http_server: &T,
) -> Result<Outcome, InterpretError> {
let http_server = &topology.http_server;
http_server.ensure_initialized().await?;
// http_server.set_ip(topology.router.get_gateway()).await?;
http_server.serve_files(&self.score.files_to_serve).await?;

View File

@@ -1,44 +1,41 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct IpxeScore {
//files_to_serve: Url,
}
impl Score for IpxeScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology> Score<T> for IpxeScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(IpxeInterpret::new(self.clone()))
}
fn name(&self) -> String {
"IpxeScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
pub struct IpxeInterpret {
score: IpxeScore,
_score: IpxeScore,
}
#[async_trait]
impl Interpret for IpxeInterpret {
impl<T: Topology> Interpret<T> for IpxeInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
/*
let http_server = &topology.http_server;
@@ -48,12 +45,7 @@ impl Interpret for IpxeInterpret {
self.score.files_to_serve
)))
*/
Ok(Outcome::success(format!(
"Success running {}",
self.score.name()
)))
//Ok(Outcome::success("Success".to_string()))
todo!();
}
fn get_name(&self) -> InterpretName {

View File

@@ -0,0 +1,82 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::info;
use serde::Serialize;
use crate::{
config::HARMONY_CONFIG_DIR,
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::Topology,
};
#[derive(Debug, Clone, Serialize)]
pub struct K3DInstallationScore {
pub installation_path: PathBuf,
pub cluster_name: String,
}
impl Default for K3DInstallationScore {
fn default() -> Self {
Self {
installation_path: HARMONY_CONFIG_DIR.join("k3d"),
cluster_name: "harmony".to_string(),
}
}
}
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(K3dInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
todo!()
}
}
#[derive(Debug)]
pub struct K3dInstallationInterpret {
score: K3DInstallationScore,
}
#[async_trait]
impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let k3d = k3d_rs::K3d::new(
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
match k3d.ensure_installed().await {
Ok(_client) => {
let msg = format!("k3d cluster {} is installed ", self.score.cluster_name);
info!("{msg}");
Ok(Outcome::success(msg))
}
Err(msg) => Err(InterpretError::new(format!(
"K3dInstallationInterpret failed to ensure k3d is installed : {msg}"
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::K3dInstallation
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,2 @@
mod install;
pub use install::*;

View File

@@ -1,19 +1,26 @@
use k8s_openapi::api::apps::v1::Deployment;
use serde::Serialize;
use serde_json::json;
use crate::{interpret::Interpret, score::Score};
use crate::{
interpret::Interpret,
score::Score,
topology::{K8sclient, Topology},
};
use super::resource::{K8sResourceInterpret, K8sResourceScore};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct K8sDeploymentScore {
pub name: String,
pub image: String,
pub namespace: Option<String>,
pub env_vars: serde_json::Value,
}
impl Score for K8sDeploymentScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
let deployment: Deployment = serde_json::from_value(json!(
impl<T: Topology + K8sclient> Score<T> for K8sDeploymentScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
let deployment = json!(
{
"metadata": {
"name": self.name
@@ -33,26 +40,25 @@ impl Score for K8sDeploymentScore {
"spec": {
"containers": [
{
"image": self.image,
"name": self.image
"image": self.image,
"name": self.name,
"imagePullPolicy": "IfNotPresent",
"env": self.env_vars,
}
]
}
}
}
}
))
.unwrap();
);
let deployment: Deployment = serde_json::from_value(deployment).unwrap();
Box::new(K8sResourceInterpret {
score: K8sResourceScore::single(deployment.clone()),
score: K8sResourceScore::single(deployment.clone(), self.namespace.clone()),
})
}
fn name(&self) -> String {
"K8sDeploymentScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -1,2 +1,3 @@
pub mod deployment;
pub mod namespace;
pub mod resource;

View File

@@ -0,0 +1,46 @@
use k8s_openapi::api::core::v1::Namespace;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use serde_json::json;
use crate::{
interpret::Interpret,
score::Score,
topology::{K8sclient, Topology},
};
#[derive(Debug, Clone, Serialize)]
pub struct K8sNamespaceScore {
pub name: Option<NonBlankString>,
}
impl<T: Topology + K8sclient> Score<T> for K8sNamespaceScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
let name = match &self.name {
Some(name) => name,
None => todo!(
"Return NoOp interpret when no namespace specified or something that makes sense"
),
};
let _namespace: Namespace = serde_json::from_value(json!(
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": name,
},
}
))
.unwrap();
todo!(
"We currently only support namespaced ressources (see Scope = NamespaceResourceScope)"
);
// Box::new(K8sResourceInterpret {
// score: K8sResourceScore::single(namespace.clone()),
// })
}
fn name(&self) -> String {
"K8sNamespaceScore".to_string()
}
}

View File

@@ -1,25 +1,27 @@
use async_trait::async_trait;
use k8s_openapi::NamespaceResourceScope;
use kube::Resource;
use serde::de::DeserializeOwned;
use serde::{Serialize, de::DeserializeOwned};
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::{K8sclient, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct K8sResourceScore<K: Resource + std::fmt::Debug> {
pub resource: Vec<K>,
pub namespace: Option<String>,
}
impl<K: Resource + std::fmt::Debug> K8sResourceScore<K> {
pub fn single(resource: K) -> Self {
pub fn single(resource: K, namespace: Option<String>) -> Self {
Self {
resource: vec![resource],
namespace,
}
}
}
@@ -34,21 +36,18 @@ impl<
+ 'static
+ Send
+ Clone,
> Score for K8sResourceScore<K>
T: Topology,
> Score<T> for K8sResourceScore<K>
where
<K as kube::Resource>::DynamicType: Default,
{
fn create_interpret(&self) -> Box<dyn Interpret> {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
todo!()
}
fn name(&self) -> String {
"K8sResourceScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
@@ -66,20 +65,21 @@ impl<
+ Default
+ Send
+ Sync,
> Interpret for K8sResourceInterpret<K>
T: Topology + K8sclient,
> Interpret<T> for K8sResourceInterpret<K>
where
<K as kube::Resource>::DynamicType: Default,
{
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
topology
.oc_client()
.k8s_client()
.await
.expect("Environment should provide enough information to instanciate a client")
.apply_namespaced(&self.score.resource)
.apply_namespaced(&self.score.resource, self.score.namespace.as_deref())
.await?;
Ok(Outcome::success(

385
harmony/src/modules/lamp.rs Normal file
View File

@@ -0,0 +1,385 @@
use convert_case::{Case, Casing};
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, WORKDIR};
use dockerfile_builder::{Dockerfile, instruction_builder::EnvBuilder};
use non_blank_string_rs::NonBlankString;
use serde_json::json;
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use async_trait::async_trait;
use log::{debug, info};
use serde::Serialize;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::topology::HelmCommand;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::k8s::deployment::K8sDeploymentScore,
score::Score,
topology::{K8sclient, Topology, Url},
};
use super::helm::chart::HelmChartScore;
#[derive(Debug, Clone, Serialize)]
pub struct LAMPScore {
pub name: String,
pub domain: Url,
pub config: LAMPConfig,
pub php_version: Version,
}
#[derive(Debug, Clone, Serialize)]
pub struct LAMPConfig {
pub project_root: PathBuf,
pub ssl_enabled: bool,
pub database_size: Option<String>,
}
impl Default for LAMPConfig {
fn default() -> Self {
LAMPConfig {
project_root: Path::new("./src").to_path_buf(),
ssl_enabled: true,
database_size: None,
}
}
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for LAMPScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(LAMPInterpret {
score: self.clone(),
namespace: "harmony-lamp".to_string(),
})
}
fn name(&self) -> String {
"LampScore".to_string()
}
}
#[derive(Debug)]
pub struct LAMPInterpret {
score: LAMPScore,
namespace: String,
}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let image_name = match self.build_docker_image() {
Ok(name) => name,
Err(e) => {
return Err(InterpretError::new(format!(
"Could not build LAMP docker image {e}"
)));
}
};
info!("LAMP docker image built {image_name}");
let remote_name = match self.push_docker_image(&image_name) {
Ok(remote_name) => remote_name,
Err(e) => {
return Err(InterpretError::new(format!(
"Could not push docker image {e}"
)));
}
};
info!("LAMP docker image pushed to {remote_name}");
info!("Deploying database");
self.deploy_database(inventory, topology).await?;
let base_name = self.score.name.to_case(Case::Kebab);
let secret_name = format!("{}-database-mariadb", base_name);
let deployment_score = K8sDeploymentScore {
name: <LAMPScore as Score<T>>::name(&self.score).to_case(Case::Kebab),
image: remote_name,
namespace: self.get_namespace().map(|nbs| nbs.to_string()),
env_vars: json!([
{
"name": "MYSQL_PASSWORD",
"valueFrom": {
"secretKeyRef": {
"name": secret_name,
"key": "mariadb-root-password"
}
}
},
{
"name": "MYSQL_HOST",
"value": secret_name
},
]),
};
info!("Deploying score {deployment_score:#?}");
deployment_score
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP deployment_score {deployment_score:?}");
todo!("1. [x] Use HelmChartScore to deploy mariadb
2. [x] Use deploymentScore to deploy lamp docker container
3. for remote clusters, push the image to some registry (use nationtech's for demos? push to the cluster's registry?)");
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl LAMPInterpret {
async fn deploy_database<T: Topology + K8sclient + HelmCommand>(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let mut values_overrides = HashMap::new();
if let Some(database_size) = self.score.config.database_size.clone() {
values_overrides.insert(
NonBlankString::from_str("primary.persistence.size").unwrap(),
database_size,
);
}
let score = HelmChartScore {
namespace: self.get_namespace(),
release_name: NonBlankString::from_str(&format!("{}-database", self.score.name))
.unwrap(),
chart_name: NonBlankString::from_str(
"oci://registry-1.docker.io/bitnamicharts/mariadb",
)
.unwrap(),
chart_version: None,
values_overrides: Some(values_overrides),
create_namespace: true,
install_only: true,
values_yaml: None,
};
score.create_interpret().execute(inventory, topology).await
}
fn build_dockerfile(&self, score: &LAMPScore) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut dockerfile = Dockerfile::new();
// Use the PHP version from the score to determine the base image
let php_version = score.php_version.to_string();
let php_major_minor = php_version
.split('.')
.take(2)
.collect::<Vec<&str>>()
.join(".");
// Base image selection - using official PHP image with Apache
dockerfile.push(FROM::from(format!("php:{}-apache", php_major_minor)));
// Set environment variables for PHP configuration
dockerfile.push(ENV::from("PHP_MEMORY_LIMIT=256M"));
dockerfile.push(ENV::from("PHP_MAX_EXECUTION_TIME=30"));
dockerfile.push(
EnvBuilder::builder()
.key("PHP_ERROR_REPORTING")
.value("\"E_ERROR | E_WARNING | E_PARSE\"")
.build()
.unwrap(),
);
// Install necessary PHP extensions and dependencies
dockerfile.push(RUN::from(
"apt-get update && \
apt-get install -y --no-install-recommends \
libfreetype6-dev \
libjpeg62-turbo-dev \
libpng-dev \
libzip-dev \
unzip \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*",
));
dockerfile.push(RUN::from(
"docker-php-ext-configure gd --with-freetype --with-jpeg && \
docker-php-ext-install -j$(nproc) \
gd \
mysqli \
pdo_mysql \
zip \
opcache",
));
// Copy PHP configuration
dockerfile.push(RUN::from("mkdir -p /usr/local/etc/php/conf.d/"));
// Create and copy a custom PHP configuration
let php_config = r#"
memory_limit = ${PHP_MEMORY_LIMIT}
max_execution_time = ${PHP_MAX_EXECUTION_TIME}
error_reporting = ${PHP_ERROR_REPORTING}
display_errors = Off
log_errors = On
error_log = /dev/stderr
date.timezone = UTC
; Opcache configuration for production
opcache.enable=1
opcache.memory_consumption=128
opcache.interned_strings_buffer=8
opcache.max_accelerated_files=4000
opcache.revalidate_freq=2
opcache.fast_shutdown=1
"#;
// Save this configuration to a temporary file within the project root
let config_path = Path::new(&score.config.project_root).join("docker-php.ini");
fs::write(&config_path, php_config)?;
// Reference the file within the Docker context (where the build runs)
dockerfile.push(COPY::from(
"docker-php.ini /usr/local/etc/php/conf.d/docker-php.ini",
));
// Security hardening
dockerfile.push(RUN::from(
"a2enmod headers && \
a2enmod rewrite && \
sed -i 's/ServerTokens OS/ServerTokens Prod/' /etc/apache2/conf-enabled/security.conf && \
sed -i 's/ServerSignature On/ServerSignature Off/' /etc/apache2/conf-enabled/security.conf"
));
// Set env vars
dockerfile.push(RUN::from(
"echo 'PassEnv MYSQL_PASSWORD' >> /etc/apache2/sites-available/000-default.conf \
&& echo 'PassEnv MYSQL_USER' >> /etc/apache2/sites-available/000-default.conf \
&& echo 'PassEnv MYSQL_HOST' >> /etc/apache2/sites-available/000-default.conf",
));
// Create a dedicated user for running Apache
dockerfile.push(RUN::from(
"groupadd -g 1000 appuser && \
useradd -u 1000 -g appuser -m -s /bin/bash appuser && \
chown -R appuser:appuser /var/www/html",
));
// Set the working directory
dockerfile.push(WORKDIR::from("/var/www/html"));
// Copy application code from the project root to the container
// Note: In Dockerfile, the COPY context is relative to the build context
// We'll handle the actual context in the build_docker_image method
dockerfile.push(COPY::from(". /var/www/html"));
// Fix permissions
dockerfile.push(RUN::from("chown -R appuser:appuser /var/www/html"));
// Expose Apache port
dockerfile.push(EXPOSE::from("80/tcp"));
// Set the default command
dockerfile.push(CMD::from("apache2-foreground"));
// Save the Dockerfile to disk in the project root
let dockerfile_path = Path::new(&score.config.project_root).join("Dockerfile");
fs::write(&dockerfile_path, dockerfile.to_string())?;
Ok(dockerfile_path)
}
fn check_output(
&self,
output: &std::process::Output,
msg: &str,
) -> Result<(), Box<dyn std::error::Error>> {
if !output.status.success() {
return Err(format!("{msg}: {}", String::from_utf8_lossy(&output.stderr)).into());
}
Ok(())
}
fn push_docker_image(&self, image_name: &str) -> Result<String, Box<dyn std::error::Error>> {
let full_tag = format!("{}/{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT, &image_name);
let output = std::process::Command::new("docker")
.args(["tag", image_name, &full_tag])
.output()?;
self.check_output(&output, "Tagging docker image failed")?;
debug!(
"docker tag output {} {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
let output = std::process::Command::new("docker")
.args(["push", &full_tag])
.output()?;
self.check_output(&output, "Pushing docker image failed")?;
debug!(
"docker push output {} {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
Ok(full_tag)
}
pub fn build_docker_image(&self) -> Result<String, Box<dyn std::error::Error>> {
info!("Generating Dockerfile");
let dockerfile = self.build_dockerfile(&self.score)?;
info!(
"Building Docker image with file {} from root {}",
dockerfile.to_string_lossy(),
self.score.config.project_root.to_string_lossy()
);
let image_name = format!("{}-php-apache", self.score.name);
let project_root = &self.score.config.project_root;
let output = std::process::Command::new("docker")
.args([
"build",
"--file",
dockerfile.to_str().unwrap(),
"-t",
&image_name,
project_root.to_str().unwrap(),
])
.output()?;
if !output.status.success() {
return Err(format!(
"Failed to build Docker image: {}",
String::from_utf8_lossy(&output.stderr)
)
.into());
}
Ok(image_name)
}
fn get_namespace(&self) -> Option<NonBlankString> {
Some(NonBlankString::from_str(&self.namespace).unwrap())
}
}

View File

@@ -1,15 +1,16 @@
use async_trait::async_trait;
use log::info;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, LoadBalancerService},
topology::{LoadBalancer, LoadBalancerService, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct LoadBalancerScore {
pub public_services: Vec<LoadBalancerService>,
pub private_services: Vec<LoadBalancerService>,
@@ -19,18 +20,14 @@ pub struct LoadBalancerScore {
// uuid?
}
impl Score for LoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + LoadBalancer> Score<T> for LoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(LoadBalancerInterpret::new(self.clone()))
}
fn name(&self) -> String {
"LoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
@@ -51,37 +48,32 @@ impl LoadBalancerInterpret {
}
#[async_trait]
impl Interpret for LoadBalancerInterpret {
impl<T: Topology + LoadBalancer> Interpret<T> for LoadBalancerInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
load_balancer: &T,
) -> Result<Outcome, InterpretError> {
info!(
"Making sure Load Balancer is initialized: {:?}",
topology.load_balancer.ensure_initialized().await?
load_balancer.ensure_initialized().await?
);
for service in self.score.public_services.iter() {
info!("Ensuring service exists {service:?}");
topology
.load_balancer
.ensure_service_exists(service)
.await?;
load_balancer.ensure_service_exists(service).await?;
}
for service in self.score.private_services.iter() {
info!("Ensuring private service exists {service:?}");
topology
.load_balancer
.ensure_service_exists(service)
.await?;
load_balancer.ensure_service_exists(service).await?;
}
info!("Applying load balancer configuration");
topology.load_balancer.commit_config().await?;
load_balancer.commit_config().await?;
info!("Making a full reload and restart of haproxy");
topology.load_balancer.reload_restart().await?;
load_balancer.reload_restart().await?;
Ok(Outcome::success(format!(
"Load balancer successfully configured {} services",
self.score.public_services.len() + self.score.private_services.len()

View File

@@ -1,8 +1,11 @@
pub mod dhcp;
pub mod dns;
pub mod dummy;
pub mod helm;
pub mod http;
pub mod k3d;
pub mod k8s;
pub mod lamp;
pub mod load_balancer;
pub mod okd;
pub mod opnsense;

View File

@@ -1,12 +1,14 @@
use serde::Serialize;
use crate::{
interpret::Interpret,
inventory::Inventory,
modules::dhcp::DhcpScore,
score::Score,
topology::{HAClusterTopology, HostBinding},
topology::{DhcpServer, HAClusterTopology, HostBinding, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDBootstrapDhcpScore {
dhcp_score: DhcpScore,
}
@@ -50,16 +52,12 @@ impl OKDBootstrapDhcpScore {
}
}
impl Score for OKDBootstrapDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DhcpServer> Score<T> for OKDBootstrapDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.dhcp_score.create_interpret()
}
fn name(&self) -> String {
"OKDBootstrapDhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -1,16 +1,18 @@
use std::net::SocketAddr;
use serde::Serialize;
use crate::{
interpret::Interpret,
modules::load_balancer::LoadBalancerScore,
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode,
LoadBalancerService,
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
LoadBalancerService, Topology,
},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDBootstrapLoadBalancerScore {
load_balancer_score: LoadBalancerScore,
}
@@ -69,16 +71,12 @@ impl OKDBootstrapLoadBalancerScore {
}
}
impl Score for OKDBootstrapLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + LoadBalancer> Score<T> for OKDBootstrapLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.load_balancer_score.create_interpret()
}
fn name(&self) -> String {
"OKDBootstrapLoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -1,12 +1,14 @@
use serde::Serialize;
use crate::{
interpret::Interpret,
inventory::Inventory,
modules::dhcp::DhcpScore,
score::Score,
topology::{HAClusterTopology, HostBinding},
topology::{DhcpServer, HAClusterTopology, HostBinding, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDDhcpScore {
dhcp_score: DhcpScore,
}
@@ -60,16 +62,12 @@ impl OKDDhcpScore {
}
}
impl Score for OKDDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DhcpServer> Score<T> for OKDDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.dhcp_score.create_interpret()
}
fn name(&self) -> String {
"OKDDhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -1,11 +1,13 @@
use serde::Serialize;
use crate::{
interpret::Interpret,
modules::dns::DnsScore,
score::Score,
topology::{DnsRecord, DnsRecordType, HAClusterTopology},
topology::{DnsRecord, DnsRecordType, DnsServer, HAClusterTopology, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDDnsScore {
dns_score: DnsScore,
}
@@ -40,16 +42,12 @@ impl OKDDnsScore {
}
}
impl Score for OKDDnsScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DnsServer> Score<T> for OKDDnsScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.dns_score.create_interpret()
}
fn name(&self) -> String {
"OKDDnsScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -1,16 +1,24 @@
use std::net::SocketAddr;
use serde::Serialize;
use crate::{
interpret::Interpret,
modules::load_balancer::LoadBalancerScore,
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode,
LoadBalancerService,
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
LoadBalancerService, Topology,
},
};
#[derive(Debug, Clone)]
impl std::fmt::Display for OKDLoadBalancerScore {
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
todo!()
}
}
#[derive(Debug, Clone, Serialize)]
pub struct OKDLoadBalancerScore {
load_balancer_score: LoadBalancerScore,
}
@@ -80,16 +88,12 @@ impl OKDLoadBalancerScore {
}
}
impl Score for OKDLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + LoadBalancer> Score<T> for OKDLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.load_balancer_score.create_interpret()
}
fn name(&self) -> String {
"OKDLoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -1,16 +1,16 @@
use crate::{data::Version, score::Score};
use crate::data::Version;
#[derive(Debug, Clone)]
pub struct OKDUpgradeScore {
current_version: Version,
target_version: Version,
_current_version: Version,
_target_version: Version,
}
impl OKDUpgradeScore {
pub fn new() -> Self {
Self {
current_version: Version::from("4.17.0-okd-scos.0").unwrap(),
target_version: Version::from("").unwrap(),
_current_version: Version::from("4.17.0-okd-scos.0").unwrap(),
_target_version: Version::from("").unwrap(),
}
}
}

View File

@@ -1,6 +1,4 @@
mod shell;
mod upgrade;
pub use shell::*;
pub use upgrade::*;

View File

@@ -1,6 +1,7 @@
use std::sync::Arc;
use async_trait::async_trait;
use serde::Serialize;
use tokio::sync::RwLock;
use crate::{
@@ -8,17 +9,34 @@ use crate::{
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
#[derive(Debug, Clone)]
pub struct OPNsenseShellCommandScore {
// TODO I am pretty sure we should not hold a direct reference to the
// opnsense_config::Config here.
// This causes a problem with serialization but also could cause many more problems as this
// is mixing concerns of configuration (which is the Responsibility of Scores to define)
// and state/execution which is the responsibility of interprets via topologies to manage
//
// I feel like a better solution would be for this Score/Interpret to require
// Topology + OPNSenseShell trait bindings
pub opnsense: Arc<RwLock<opnsense_config::Config>>,
pub command: String,
}
impl Score for OPNsenseShellCommandScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl Serialize for OPNsenseShellCommandScore {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!("See comment about moving opnsense_config::Config outside the score")
}
}
impl<T: Topology> Score<T> for OPNsenseShellCommandScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OPNsenseShellInterpret {
status: InterpretStatus::QUEUED,
score: self.clone(),
@@ -28,10 +46,6 @@ impl Score for OPNsenseShellCommandScore {
fn name(&self) -> String {
"OPNSenseShellCommandScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
@@ -41,11 +55,11 @@ pub struct OPNsenseShellInterpret {
}
#[async_trait]
impl Interpret for OPNsenseShellInterpret {
impl<T: Topology> Interpret<T> for OPNsenseShellInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let output = self
.score

View File

@@ -1,10 +1,12 @@
use std::sync::Arc;
use serde::Serialize;
use tokio::sync::RwLock;
use crate::{
interpret::{Interpret, InterpretStatus},
score::Score,
topology::Topology,
};
use super::{OPNsenseShellCommandScore, OPNsenseShellInterpret};
@@ -14,8 +16,17 @@ pub struct OPNSenseLaunchUpgrade {
pub opnsense: Arc<RwLock<opnsense_config::Config>>,
}
impl Score for OPNSenseLaunchUpgrade {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl Serialize for OPNSenseLaunchUpgrade {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!("See comment in OPNSenseShellCommandScore and apply the same idea here")
}
}
impl<T: Topology> Score<T> for OPNSenseLaunchUpgrade {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
let score = OPNsenseShellCommandScore {
opnsense: self.opnsense.clone(),
command: "/usr/local/opnsense/scripts/firmware/update.sh".to_string(),
@@ -30,8 +41,4 @@ impl Score for OPNSenseLaunchUpgrade {
fn name(&self) -> String {
"OPNSenseLaunchUpgrade".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@@ -1,31 +1,28 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, Url},
topology::{Router, TftpServer, Topology, Url},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct TftpScore {
files_to_serve: Url,
}
impl Score for TftpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + TftpServer + Router> Score<T> for TftpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(TftpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"TftpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
@@ -34,18 +31,17 @@ pub struct TftpInterpret {
}
#[async_trait]
impl Interpret for TftpInterpret {
impl<T: Topology + TftpServer + Router> Interpret<T> for TftpInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
let tftp_server = &topology.tftp_server;
tftp_server.ensure_initialized().await?;
tftp_server.set_ip(topology.router.get_gateway()).await?;
tftp_server.serve_files(&self.score.files_to_serve).await?;
tftp_server.commit_config().await?;
tftp_server.reload_restart().await?;
topology.ensure_initialized().await?;
topology.set_ip(topology.get_gateway()).await?;
topology.serve_files(&self.score.files_to_serve).await?;
topology.commit_config().await?;
topology.reload_restart().await?;
Ok(Outcome::success(format!(
"TFTP Server running and serving files from {}",
self.score.files_to_serve