chore: Ran cargo fmt on all code

This commit is contained in:
Jean-Gabriel Gill-Couture 2025-01-24 10:45:30 -05:00
parent d6c8650d52
commit 21258cf1af
23 changed files with 72 additions and 55 deletions

View File

@ -109,11 +109,10 @@ impl From<ExecutorError> for InterpretError {
}
}
impl From<kube::Error> for InterpretError{
impl From<kube::Error> for InterpretError {
fn from(value: kube::Error) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}

View File

@ -1,7 +1,6 @@
use derive_new::new;
use log::info;
use super::{
interpret::{Interpret, InterpretError, Outcome},
inventory::Inventory,

View File

@ -2,7 +2,10 @@ use async_trait::async_trait;
use harmony_types::net::MacAddress;
use log::debug;
use crate::{executors::ExecutorError, topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost}};
use crate::{
executors::ExecutorError,
topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost},
};
use super::OPNSenseFirewall;
@ -27,10 +30,7 @@ impl DhcpServer for OPNSenseFirewall {
Ok(())
}
async fn remove_static_mapping(
&self,
_mac: &MacAddress,
) -> Result<(), ExecutorError> {
async fn remove_static_mapping(&self, _mac: &MacAddress) -> Result<(), ExecutorError> {
todo!()
}

View File

@ -1,8 +1,11 @@
use crate::infra::opnsense::Host;
use crate::infra::opnsense::IpAddress;
use crate::infra::opnsense::LogicalHost;
use crate::{
executors::ExecutorError,
topology::{DnsRecord, DnsServer},
};
use async_trait::async_trait;
use crate::{executors::ExecutorError, topology::{DnsRecord, DnsServer}};
use super::OPNSenseFirewall;

View File

@ -1,4 +1,7 @@
use crate::{executors::ExecutorError, topology::{Firewall, FirewallRule, IpAddress, LogicalHost}};
use crate::{
executors::ExecutorError,
topology::{Firewall, FirewallRule, IpAddress, LogicalHost},
};
use super::OPNSenseFirewall;

View File

@ -1,5 +1,5 @@
use derive_new::new;
use crate::hardware::ManagementInterface;
use derive_new::new;
#[derive(new)]
pub struct OPNSenseManagementInterface {}

View File

@ -1,10 +1,10 @@
mod dhcp;
mod dns;
mod firewall;
mod http;
mod load_balancer;
mod management;
mod tftp;
mod http;
use std::sync::Arc;
pub use management::*;
@ -27,12 +27,7 @@ impl OPNSenseFirewall {
self.host.ip
}
pub async fn new(
host: LogicalHost,
port: Option<u16>,
username: &str,
password: &str,
) -> Self {
pub async fn new(host: LogicalHost, port: Option<u16>, username: &str, password: &str) -> Self {
Self {
opnsense_config: Arc::new(RwLock::new(
opnsense_config::Config::from_credentials(host.ip, port, username, password).await,

View File

@ -4,6 +4,4 @@ pub mod infra;
pub mod modules;
#[cfg(test)]
mod test {
}
mod test {}

View File

@ -1,4 +1,3 @@
use harmony_macros::yaml;
use k8s_openapi::api::apps::v1::Deployment;
use serde_json::json;

View File

@ -1,4 +1,2 @@
pub mod resource;
pub mod deployment;
pub mod resource;

View File

@ -63,7 +63,7 @@ where
{
async fn execute(
&self,
inventory: &Inventory,
_inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
topology

View File

@ -54,12 +54,18 @@ impl Interpret for LoadBalancerInterpret {
topology.load_balancer.ensure_initialized().await?;
for service in self.score.public_services.iter() {
info!("Ensuring service exists {service:?}");
topology.load_balancer.ensure_service_exists(service).await?;
topology
.load_balancer
.ensure_service_exists(service)
.await?;
}
for service in self.score.private_services.iter() {
info!("Ensuring private service exists {service:?}");
topology.load_balancer.ensure_service_exists(service).await?;
topology
.load_balancer
.ensure_service_exists(service)
.await?;
}
info!("Applying load balancer configuration");

View File

@ -1,7 +1,7 @@
pub mod dhcp;
pub mod dns;
pub mod okd;
pub mod load_balancer;
pub mod tftp;
pub mod http;
pub mod k8s;
pub mod load_balancer;
pub mod okd;
pub mod tftp;

View File

@ -51,10 +51,7 @@ impl OKDBootstrapLoadBalancerScore {
},
}
}
fn topology_to_backend_server(
topology: &HAClusterTopology,
port: u16,
) -> Vec<BackendServer> {
fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
let mut backend: Vec<_> = topology
.control_plane
.iter()

View File

@ -1,7 +1,6 @@
pub mod bootstrap_dhcp;
pub mod bootstrap_load_balancer;
pub mod dhcp;
pub mod dns;
pub mod load_balancer;
pub mod bootstrap_load_balancer;
pub mod bootstrap_dhcp;
pub mod upgrade;

View File

@ -3,7 +3,10 @@ use std::sync::Arc;
use crate::{
config::{SshConfigManager, SshCredentials, SshOPNSenseShell},
error::Error,
modules::{caddy::CaddyConfig, dhcp::DhcpConfig, dns::DnsConfig, load_balancer::LoadBalancerConfig, tftp::TftpConfig},
modules::{
caddy::CaddyConfig, dhcp::DhcpConfig, dns::DnsConfig, load_balancer::LoadBalancerConfig,
tftp::TftpConfig,
},
};
use log::{info, trace};
use opnsense_config_xml::OPNsense;
@ -50,7 +53,7 @@ impl Config {
LoadBalancerConfig::new(&mut self.opnsense, self.shell.clone())
}
pub async fn upload_files(&self, source: &str, destination: &str) -> Result<String, Error> {
pub async fn upload_files(&self, source: &str, destination: &str) -> Result<String, Error> {
self.shell.upload_folder(source, destination).await
}

View File

@ -1,8 +1,8 @@
mod ssh;
mod local_file;
mod ssh;
use async_trait::async_trait;
pub use ssh::*;
pub use local_file::*;
pub use ssh::*;
use crate::Error;

View File

@ -1,6 +1,6 @@
mod config;
mod manager;
mod shell;
pub use manager::*;
pub use config::*;
pub use manager::*;
pub use shell::*;

View File

@ -27,12 +27,15 @@ impl<'a> CaddyConfig<'a> {
{
match &mut self.opnsense.pischem.as_mut() {
Some(pischem) => f(&mut pischem.caddy),
None => unimplemented!("Accessing caddy config is not supported when not available yet"),
None => {
unimplemented!("Accessing caddy config is not supported when not available yet")
}
}
}
pub fn enable(&mut self, enabled: bool) {
self.with_caddy(|caddy| {caddy.general.enabled = enabled as u8;
self.with_caddy(|caddy| {
caddy.general.enabled = enabled as u8;
caddy.general.http_port = Some(8080);
caddy.general.https_port = Some(8443);
});
@ -40,8 +43,12 @@ impl<'a> CaddyConfig<'a> {
pub async fn reload_restart(&self) -> Result<(), Error> {
self.opnsense_shell.exec("configctl caddy stop").await?;
self.opnsense_shell.exec("configctl template reload OPNsense/Caddy").await?;
self.opnsense_shell.exec("configctl template reload OPNsense/Caddy/rc.conf.d").await?;
self.opnsense_shell
.exec("configctl template reload OPNsense/Caddy")
.await?;
self.opnsense_shell
.exec("configctl template reload OPNsense/Caddy/rc.conf.d")
.await?;
self.opnsense_shell.exec("configctl caddy validate").await?;
self.opnsense_shell.exec("configctl caddy start").await?;
Ok(())

View File

@ -1,6 +1,5 @@
use opnsense_config_xml::{Host, OPNsense};
pub struct DnsConfig<'a> {
opnsense: &'a mut OPNsense,
}

View File

@ -59,15 +59,25 @@ impl<'a> LoadBalancerConfig<'a> {
pub async fn reload_restart(&self) -> Result<(), Error> {
self.opnsense_shell.exec("configctl haproxy stop").await?;
self.opnsense_shell.exec("configctl template reload OPNsense/HAProxy").await?;
self.opnsense_shell.exec("configctl template reload OPNsense/Syslog").await?;
self.opnsense_shell.exec("/usr/local/sbin/haproxy -c -f /usr/local/etc/haproxy.conf.staging").await?;
self.opnsense_shell
.exec("configctl template reload OPNsense/HAProxy")
.await?;
self.opnsense_shell
.exec("configctl template reload OPNsense/Syslog")
.await?;
self.opnsense_shell
.exec("/usr/local/sbin/haproxy -c -f /usr/local/etc/haproxy.conf.staging")
.await?;
// This script copies the staging config to production config. I am not 100% sure it is
// required in the context
self.opnsense_shell.exec("/usr/local/opnsense/scripts/OPNsense/HAProxy/setup.sh deploy").await?;
self.opnsense_shell
.exec("/usr/local/opnsense/scripts/OPNsense/HAProxy/setup.sh deploy")
.await?;
self.opnsense_shell.exec("configctl haproxy configtest").await?;
self.opnsense_shell
.exec("configctl haproxy configtest")
.await?;
self.opnsense_shell.exec("configctl haproxy start").await?;
Ok(())
}

View File

@ -1,5 +1,5 @@
pub mod caddy;
pub mod dhcp;
pub mod dns;
pub mod load_balancer;
pub mod tftp;
pub mod caddy;

View File

@ -41,7 +41,9 @@ impl<'a> TftpConfig<'a> {
pub async fn reload_restart(&self) -> Result<(), Error> {
self.opnsense_shell.exec("configctl tftp stop").await?;
self.opnsense_shell.exec("configctl template reload OPNsense/Tftp").await?;
self.opnsense_shell
.exec("configctl template reload OPNsense/Tftp")
.await?;
self.opnsense_shell.exec("configctl tftp start").await?;
Ok(())
}