chore: reformat & clippy cleanup (#96)
Some checks failed
Run Check Script / check (pull_request) Has been cancelled
Run Check Script / check (push) Has been cancelled
Compile and package harmony_composer / package_harmony_composer (push) Has been cancelled

Clippy is now added to the `check` in the pipeline

Co-authored-by: Ian Letourneau <letourneau.ian@gmail.com>
Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/96
This commit is contained in:
Ian Letourneau 2025-08-06 15:57:14 +00:00
parent 024084859e
commit 440c1bce12
68 changed files with 342 additions and 350 deletions

View File

@ -13,6 +13,7 @@ WORKDIR /app
RUN rustup target add x86_64-pc-windows-gnu RUN rustup target add x86_64-pc-windows-gnu
RUN rustup target add x86_64-unknown-linux-gnu RUN rustup target add x86_64-unknown-linux-gnu
RUN rustup component add rustfmt RUN rustup component add rustfmt
RUN rustup component add clippy
RUN apt update RUN apt update
@ -22,4 +23,4 @@ RUN apt install -y nodejs docker.io mingw-w64
COPY --from=build /app/target/release/harmony_composer . COPY --from=build /app/target/release/harmony_composer .
ENTRYPOINT ["/app/harmony_composer"] ENTRYPOINT ["/app/harmony_composer"]

View File

@ -1,5 +1,7 @@
#!/bin/sh #!/bin/sh
set -e set -e
cargo check --all-targets --all-features --keep-going cargo check --all-targets --all-features --keep-going
cargo fmt --check cargo fmt --check
cargo clippy
cargo test cargo test

View File

@ -1,17 +1,11 @@
use std::{path::PathBuf, sync::Arc}; use std::{path::PathBuf, str::FromStr, sync::Arc};
use harmony::{ use harmony::{
data::Id, data::Id,
inventory::Inventory, inventory::Inventory,
maestro::Maestro,
modules::{ modules::{
application::{ application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
ApplicationScore, RustWebFramework, RustWebapp, monitoring::alert_channel::webhook_receiver::WebhookReceiver,
features::{ContinuousDelivery, Monitoring},
},
monitoring::alert_channel::{
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
},
tenant::TenantScore, tenant::TenantScore,
}, },
topology::{K8sAnywhereTopology, Url, tenant::TenantConfig}, topology::{K8sAnywhereTopology, Url, tenant::TenantConfig},
@ -25,7 +19,7 @@ async fn main() {
//the TenantConfig.name must match //the TenantConfig.name must match
let tenant = TenantScore { let tenant = TenantScore {
config: TenantConfig { config: TenantConfig {
id: Id::from_str("test-tenant-id"), id: Id::from_str("test-tenant-id").unwrap(),
name: "example-monitoring".to_string(), name: "example-monitoring".to_string(),
..Default::default() ..Default::default()
}, },

View File

@ -125,40 +125,47 @@ spec:
name: nginx"#, name: nginx"#,
) )
.unwrap(); .unwrap();
return deployment; deployment
} }
fn nginx_deployment_2() -> Deployment { fn nginx_deployment_2() -> Deployment {
let mut pod_template = PodTemplateSpec::default(); let pod_template = PodTemplateSpec {
pod_template.metadata = Some(ObjectMeta { metadata: Some(ObjectMeta {
labels: Some(BTreeMap::from([( labels: Some(BTreeMap::from([(
"app".to_string(), "app".to_string(),
"nginx-test".to_string(), "nginx-test".to_string(),
)])), )])),
..Default::default()
});
pod_template.spec = Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default() ..Default::default()
}], }),
..Default::default() spec: Some(PodSpec {
}); containers: vec![Container {
let mut spec = DeploymentSpec::default(); name: "nginx".to_string(),
spec.template = pod_template; image: Some("nginx".to_string()),
spec.selector = LabelSelector { ..Default::default()
match_expressions: None, }],
match_labels: Some(BTreeMap::from([( ..Default::default()
"app".to_string(), }),
"nginx-test".to_string(),
)])),
}; };
let mut deployment = Deployment::default(); let spec = DeploymentSpec {
deployment.spec = Some(spec); template: pod_template,
deployment.metadata.name = Some("nginx-test".to_string()); selector: LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
},
..Default::default()
};
deployment Deployment {
spec: Some(spec),
metadata: ObjectMeta {
name: Some("nginx-test".to_string()),
..Default::default()
},
..Default::default()
}
} }
fn nginx_deployment() -> Deployment { fn nginx_deployment() -> Deployment {

View File

@ -23,7 +23,7 @@ async fn main() {
// This config can be extended as needed for more complicated configurations // This config can be extended as needed for more complicated configurations
config: LAMPConfig { config: LAMPConfig {
project_root: "./php".into(), project_root: "./php".into(),
database_size: format!("4Gi").into(), database_size: "4Gi".to_string().into(),
..Default::default() ..Default::default()
}, },
}; };

View File

@ -1,4 +1,4 @@
use std::collections::HashMap; use std::{collections::HashMap, str::FromStr};
use harmony::{ use harmony::{
data::Id, data::Id,
@ -28,7 +28,7 @@ use harmony::{
async fn main() { async fn main() {
let tenant = TenantScore { let tenant = TenantScore {
config: TenantConfig { config: TenantConfig {
id: Id::from_string("1234".to_string()), id: Id::from_str("1234").unwrap(),
name: "test-tenant".to_string(), name: "test-tenant".to_string(),
resource_limits: ResourceLimits { resource_limits: ResourceLimits {
cpu_request_cores: 6.0, cpu_request_cores: 6.0,

View File

@ -1,3 +1,5 @@
use std::str::FromStr;
use harmony::{ use harmony::{
data::Id, data::Id,
inventory::Inventory, inventory::Inventory,
@ -9,7 +11,7 @@ use harmony::{
async fn main() { async fn main() {
let tenant = TenantScore { let tenant = TenantScore {
config: TenantConfig { config: TenantConfig {
id: Id::from_str("test-tenant-id"), id: Id::from_str("test-tenant-id").unwrap(),
name: "testtenant".to_string(), name: "testtenant".to_string(),
..Default::default() ..Default::default()
}, },

View File

@ -1,5 +1,6 @@
use rand::distr::Alphanumeric; use rand::distr::Alphanumeric;
use rand::distr::SampleString; use rand::distr::SampleString;
use std::str::FromStr;
use std::time::SystemTime; use std::time::SystemTime;
use std::time::UNIX_EPOCH; use std::time::UNIX_EPOCH;
@ -23,13 +24,13 @@ pub struct Id {
value: String, value: String,
} }
impl Id { impl FromStr for Id {
pub fn from_string(value: String) -> Self { type Err = ();
Self { value }
}
pub fn from_str(value: &str) -> Self { fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_string(value.to_string()) Ok(Id {
value: s.to_string(),
})
} }
} }

View File

@ -47,7 +47,7 @@ impl serde::Serialize for Version {
impl std::fmt::Display for Version { impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
return self.value.fmt(f); self.value.fmt(f)
} }
} }

View File

@ -35,10 +35,9 @@ impl PhysicalHost {
pub fn cluster_mac(&self) -> MacAddress { pub fn cluster_mac(&self) -> MacAddress {
self.network self.network
.get(0) .first()
.expect("Cluster physical host should have a network interface") .expect("Cluster physical host should have a network interface")
.mac_address .mac_address
.clone()
} }
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self { pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {

View File

@ -70,10 +70,7 @@ impl<T: Topology> Maestro<T> {
fn is_topology_initialized(&self) -> bool { fn is_topology_initialized(&self) -> bool {
let result = self.topology_preparation_result.lock().unwrap(); let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() { if let Some(outcome) = result.as_ref() {
match outcome.status { matches!(outcome.status, InterpretStatus::SUCCESS)
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else { } else {
false false
} }

View File

@ -16,7 +16,7 @@ pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value; fn serialize(&self) -> Value;
} }
impl<'de, S, T> SerializeScore<T> for S impl<S, T> SerializeScore<T> for S
where where
T: Topology, T: Topology,
S: Score<T> + Serialize, S: Score<T> + Serialize,
@ -24,7 +24,7 @@ where
fn serialize(&self) -> Value { fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble // TODO not sure if this is the right place to handle the error or it should bubble
// up? // up?
serde_value::to_value(&self).expect("Score should serialize successfully") serde_value::to_value(self).expect("Score should serialize successfully")
} }
} }

View File

@ -1,5 +1,4 @@
use derive_new::new; use derive_new::new;
use futures_util::StreamExt;
use k8s_openapi::{ use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope, ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod}, api::{apps::v1::Deployment, core::v1::Pod},
@ -18,7 +17,7 @@ use kube::{
}; };
use log::{debug, error, trace}; use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use similar::{DiffableStr, TextDiff}; use similar::TextDiff;
#[derive(new, Clone)] #[derive(new, Clone)]
pub struct K8sClient { pub struct K8sClient {
@ -67,13 +66,13 @@ impl K8sClient {
} }
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed()); let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
let t = if let Some(t) = timeout { t } else { 300 }; let t = timeout.unwrap_or(300);
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await; let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
if let Ok(r) = res { if res.is_ok() {
return Ok(()); Ok(())
} else { } else {
return Err("timed out while waiting for deployment".to_string()); Err("timed out while waiting for deployment".to_string())
} }
} }
@ -112,7 +111,7 @@ impl K8sClient {
.await; .await;
match res { match res {
Err(e) => return Err(e.to_string()), Err(e) => Err(e.to_string()),
Ok(mut process) => { Ok(mut process) => {
let status = process let status = process
.take_status() .take_status()
@ -122,13 +121,9 @@ impl K8sClient {
if let Some(s) = status.status { if let Some(s) = status.status {
debug!("Status: {}", s); debug!("Status: {}", s);
if s == "Success" { if s == "Success" { Ok(()) } else { Err(s) }
return Ok(());
} else {
return Err(s);
}
} else { } else {
return Err("Couldn't get inner status of pod exec".to_string()); Err("Couldn't get inner status of pod exec".to_string())
} }
} }
} }
@ -169,8 +164,9 @@ impl K8sClient {
trace!("Received current value {current:#?}"); trace!("Received current value {current:#?}");
// The resource exists, so we calculate and display a diff. // The resource exists, so we calculate and display a diff.
println!("\nPerforming dry-run for resource: '{}'", name); println!("\nPerforming dry-run for resource: '{}'", name);
let mut current_yaml = serde_yaml::to_value(&current) let mut current_yaml = serde_yaml::to_value(&current).unwrap_or_else(|_| {
.expect(&format!("Could not serialize current value : {current:#?}")); panic!("Could not serialize current value : {current:#?}")
});
if current_yaml.is_mapping() && current_yaml.get("status").is_some() { if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
let map = current_yaml.as_mapping_mut().unwrap(); let map = current_yaml.as_mapping_mut().unwrap();
let removed = map.remove_entry("status"); let removed = map.remove_entry("status");
@ -237,7 +233,7 @@ impl K8sClient {
} }
} }
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error> pub async fn apply_many<K>(&self, resource: &[K], ns: Option<&str>) -> Result<Vec<K>, Error>
where where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize, K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as Resource>::Scope: ApplyStrategy<K>, <K as Resource>::Scope: ApplyStrategy<K>,
@ -253,7 +249,7 @@ impl K8sClient {
pub async fn apply_yaml_many( pub async fn apply_yaml_many(
&self, &self,
yaml: &Vec<serde_yaml::Value>, #[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>,
ns: Option<&str>, ns: Option<&str>,
) -> Result<(), Error> { ) -> Result<(), Error> {
for y in yaml.iter() { for y in yaml.iter() {

View File

@ -87,7 +87,9 @@ impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
.execute(inventory, self) .execute(inventory, self)
.await?; .await?;
Ok(Outcome::success(format!("No action, working on cluster "))) Ok(Outcome::success(
"No action, working on cluster ".to_string(),
))
} }
} }
@ -124,7 +126,7 @@ impl K8sAnywhereTopology {
) -> K8sPrometheusCRDAlertingScore { ) -> K8sPrometheusCRDAlertingScore {
K8sPrometheusCRDAlertingScore { K8sPrometheusCRDAlertingScore {
sender, sender,
receivers: receivers.unwrap_or_else(Vec::new), receivers: receivers.unwrap_or_default(),
service_monitors: vec![], service_monitors: vec![],
prometheus_rules: vec![], prometheus_rules: vec![],
} }
@ -176,7 +178,7 @@ impl K8sAnywhereTopology {
} else { } else {
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig { if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
debug!("Loading kubeconfig {kubeconfig}"); debug!("Loading kubeconfig {kubeconfig}");
match self.try_load_kubeconfig(&kubeconfig).await { match self.try_load_kubeconfig(kubeconfig).await {
Some(client) => { Some(client) => {
return Ok(Some(K8sState { return Ok(Some(K8sState {
client: Arc::new(client), client: Arc::new(client),
@ -232,7 +234,7 @@ impl K8sAnywhereTopology {
} }
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> { async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
if let Some(_) = self.tenant_manager.get() { if self.tenant_manager.get().is_some() {
return Ok(()); return Ok(());
} }
@ -366,7 +368,7 @@ impl Topology for K8sAnywhereTopology {
self.ensure_k8s_tenant_manager() self.ensure_k8s_tenant_manager()
.await .await
.map_err(|e| InterpretError::new(e))?; .map_err(InterpretError::new)?;
match self.is_helm_available() { match self.is_helm_available() {
Ok(()) => Ok(Outcome::success(format!( Ok(()) => Ok(Outcome::success(format!(

View File

@ -88,7 +88,7 @@ impl Serialize for Url {
{ {
match self { match self {
Url::LocalFolder(path) => serializer.serialize_str(path), Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(&url.as_str()), Url::Url(url) => serializer.serialize_str(url.as_str()),
} }
} }
} }

View File

@ -27,11 +27,11 @@ pub struct UnmanagedRouter {
impl Router for UnmanagedRouter { impl Router for UnmanagedRouter {
fn get_gateway(&self) -> IpAddress { fn get_gateway(&self) -> IpAddress {
self.gateway.clone() self.gateway
} }
fn get_cidr(&self) -> Ipv4Cidr { fn get_cidr(&self) -> Ipv4Cidr {
self.cidr.clone() self.cidr
} }
fn get_host(&self) -> LogicalHost { fn get_host(&self) -> LogicalHost {

View File

@ -309,19 +309,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> = let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data { c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort { super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int(port.clone().into())), port: Some(IntOrString::Int((*port).into())),
..Default::default() ..Default::default()
}], }],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort { super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int(start.clone().into())), port: Some(IntOrString::Int((*start).into())),
end_port: Some(end.clone().into()), end_port: Some((*end).into()),
protocol: None, // Not currently supported by Harmony protocol: None, // Not currently supported by Harmony
}], }],
super::PortSpecData::ListOfPorts(items) => items super::PortSpecData::ListOfPorts(items) => items
.iter() .iter()
.map(|i| NetworkPolicyPort { .map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int(i.clone().into())), port: Some(IntOrString::Int((*i).into())),
..Default::default() ..Default::default()
}) })
.collect(), .collect(),
@ -366,19 +366,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> = let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data { c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort { super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int(port.clone().into())), port: Some(IntOrString::Int((*port).into())),
..Default::default() ..Default::default()
}], }],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort { super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int(start.clone().into())), port: Some(IntOrString::Int((*start).into())),
end_port: Some(end.clone().into()), end_port: Some((*end).into()),
protocol: None, // Not currently supported by Harmony protocol: None, // Not currently supported by Harmony
}], }],
super::PortSpecData::ListOfPorts(items) => items super::PortSpecData::ListOfPorts(items) => items
.iter() .iter()
.map(|i| NetworkPolicyPort { .map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int(i.clone().into())), port: Some(IntOrString::Int((*i).into())),
..Default::default() ..Default::default()
}) })
.collect(), .collect(),

View File

@ -60,7 +60,7 @@ impl DnsServer for OPNSenseFirewall {
} }
fn get_ip(&self) -> IpAddress { fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(&self) OPNSenseFirewall::get_ip(self)
} }
fn get_host(&self) -> LogicalHost { fn get_host(&self) -> LogicalHost {

View File

@ -48,7 +48,7 @@ impl HttpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> { async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await; let mut config = self.opnsense_config.write().await;
let caddy = config.caddy(); let caddy = config.caddy();
if let None = caddy.get_full_config() { if caddy.get_full_config().is_none() {
info!("Http config not available in opnsense config, installing package"); info!("Http config not available in opnsense config, installing package");
config.install_package("os-caddy").await.map_err(|e| { config.install_package("os-caddy").await.map_err(|e| {
ExecutorError::UnexpectedError(format!( ExecutorError::UnexpectedError(format!(

View File

@ -121,10 +121,12 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
LoadBalancerService { LoadBalancerService {
backend_servers, backend_servers,
listening_port: frontend.bind.parse().expect(&format!( listening_port: frontend.bind.parse().unwrap_or_else(|_| {
"HAProxy frontend address should be a valid SocketAddr, got {}", panic!(
frontend.bind "HAProxy frontend address should be a valid SocketAddr, got {}",
)), frontend.bind
)
}),
health_check, health_check,
} }
}) })
@ -167,28 +169,28 @@ pub(crate) fn get_health_check_for_backend(
None => return None, None => return None,
}; };
let haproxy_health_check = match haproxy let haproxy_health_check = haproxy
.healthchecks .healthchecks
.healthchecks .healthchecks
.iter() .iter()
.find(|h| &h.uuid == health_check_uuid) .find(|h| &h.uuid == health_check_uuid)?;
{
Some(health_check) => health_check,
None => return None,
};
let binding = haproxy_health_check.health_check_type.to_uppercase(); let binding = haproxy_health_check.health_check_type.to_uppercase();
let uppercase = binding.as_str(); let uppercase = binding.as_str();
match uppercase { match uppercase {
"TCP" => { "TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() { if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
if checkport.len() > 0 { if !checkport.is_empty() {
return Some(HealthCheck::TCP(Some(checkport.parse().expect(&format!( return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
"HAProxy check port should be a valid port number, got {checkport}" |_| {
))))); panic!(
"HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
} }
} }
return Some(HealthCheck::TCP(None)); Some(HealthCheck::TCP(None))
} }
"HTTP" => { "HTTP" => {
let path: String = haproxy_health_check let path: String = haproxy_health_check
@ -355,16 +357,13 @@ mod tests {
// Create an HAProxy instance with servers // Create an HAProxy instance with servers
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default(); let server = HAProxyServer {
server.uuid = "server1".to_string(); uuid: "server1".to_string(),
server.address = "192.168.1.1".to_string(); address: "192.168.1.1".to_string(),
server.port = 80; port: 80,
..Default::default()
};
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
let mut server = HAProxyServer::default();
server.uuid = "server3".to_string();
server.address = "192.168.1.3".to_string();
server.port = 8080;
// Call the function // Call the function
let result = get_servers_for_backend(&backend, &haproxy); let result = get_servers_for_backend(&backend, &haproxy);
@ -384,10 +383,12 @@ mod tests {
let backend = HAProxyBackend::default(); let backend = HAProxyBackend::default();
// Create an HAProxy instance with servers // Create an HAProxy instance with servers
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default(); let server = HAProxyServer {
server.uuid = "server1".to_string(); uuid: "server1".to_string(),
server.address = "192.168.1.1".to_string(); address: "192.168.1.1".to_string(),
server.port = 80; port: 80,
..Default::default()
};
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
// Call the function // Call the function
let result = get_servers_for_backend(&backend, &haproxy); let result = get_servers_for_backend(&backend, &haproxy);
@ -402,10 +403,12 @@ mod tests {
backend.linked_servers.content = Some("server4,server5".to_string()); backend.linked_servers.content = Some("server4,server5".to_string());
// Create an HAProxy instance with servers // Create an HAProxy instance with servers
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default(); let server = HAProxyServer {
server.uuid = "server1".to_string(); uuid: "server1".to_string(),
server.address = "192.168.1.1".to_string(); address: "192.168.1.1".to_string(),
server.port = 80; port: 80,
..Default::default()
};
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
// Call the function // Call the function
let result = get_servers_for_backend(&backend, &haproxy); let result = get_servers_for_backend(&backend, &haproxy);
@ -416,20 +419,28 @@ mod tests {
#[test] #[test]
fn test_get_servers_for_backend_multiple_linked_servers() { fn test_get_servers_for_backend_multiple_linked_servers() {
// Create a backend with multiple linked servers // Create a backend with multiple linked servers
#[allow(clippy::field_reassign_with_default)]
let mut backend = HAProxyBackend::default(); let mut backend = HAProxyBackend::default();
backend.linked_servers.content = Some("server1,server2".to_string()); backend.linked_servers.content = Some("server1,server2".to_string());
//
// Create an HAProxy instance with matching servers // Create an HAProxy instance with matching servers
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let mut server = HAProxyServer::default(); let server = HAProxyServer {
server.uuid = "server1".to_string(); uuid: "server1".to_string(),
server.address = "some-hostname.test.mcd".to_string(); address: "some-hostname.test.mcd".to_string(),
server.port = 80; port: 80,
..Default::default()
};
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
let mut server = HAProxyServer::default();
server.uuid = "server2".to_string(); let server = HAProxyServer {
server.address = "192.168.1.2".to_string(); uuid: "server2".to_string(),
server.port = 8080; address: "192.168.1.2".to_string(),
port: 8080,
..Default::default()
};
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
// Call the function // Call the function
let result = get_servers_for_backend(&backend, &haproxy); let result = get_servers_for_backend(&backend, &haproxy);
// Check the result // Check the result

View File

@ -58,7 +58,7 @@ impl TftpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> { async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await; let mut config = self.opnsense_config.write().await;
let tftp = config.tftp(); let tftp = config.tftp();
if let None = tftp.get_full_config() { if tftp.get_full_config().is_none() {
info!("Tftp config not available in opnsense config, installing package"); info!("Tftp config not available in opnsense config, installing package");
config.install_package("os-tftp").await.map_err(|e| { config.install_package("os-tftp").await.map_err(|e| {
ExecutorError::UnexpectedError(format!( ExecutorError::UnexpectedError(format!(

View File

@ -13,7 +13,7 @@ pub trait ApplicationFeature<T: Topology>:
fn name(&self) -> String; fn name(&self) -> String;
} }
trait ApplicationFeatureClone<T: Topology> { pub trait ApplicationFeatureClone<T: Topology> {
fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>; fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>;
} }
@ -27,7 +27,7 @@ where
} }
impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> { impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where where
S: serde::Serializer, S: serde::Serializer,
{ {

View File

@ -184,12 +184,11 @@ impl ArgoApplication {
pub fn to_yaml(&self) -> serde_yaml::Value { pub fn to_yaml(&self) -> serde_yaml::Value {
let name = &self.name; let name = &self.name;
let namespace = if let Some(ns) = self.namespace.as_ref() { let namespace = if let Some(ns) = self.namespace.as_ref() {
&ns ns
} else { } else {
"argocd" "argocd"
}; };
let project = &self.project; let project = &self.project;
let source = &self.source;
let yaml_str = format!( let yaml_str = format!(
r#" r#"
@ -228,7 +227,7 @@ spec:
serde_yaml::to_value(&self.source).expect("couldn't serialize source to value"); serde_yaml::to_value(&self.source).expect("couldn't serialize source to value");
let sync_policy = serde_yaml::to_value(&self.sync_policy) let sync_policy = serde_yaml::to_value(&self.sync_policy)
.expect("couldn't serialize sync_policy to value"); .expect("couldn't serialize sync_policy to value");
let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit) let revision_history_limit = serde_yaml::to_value(self.revision_history_limit)
.expect("couldn't serialize revision_history_limit to value"); .expect("couldn't serialize revision_history_limit to value");
spec.insert( spec.insert(

View File

@ -10,7 +10,7 @@ use crate::{
data::Version, data::Version,
inventory::Inventory, inventory::Inventory,
modules::application::{ modules::application::{
Application, ApplicationFeature, HelmPackage, OCICompliant, ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore}, features::{ArgoApplication, ArgoHelmScore},
}, },
score::Score, score::Score,

View File

@ -986,7 +986,7 @@ commitServer:
); );
HelmChartScore { HelmChartScore {
namespace: Some(NonBlankString::from_str(&namespace).unwrap()), namespace: Some(NonBlankString::from_str(namespace).unwrap()),
release_name: NonBlankString::from_str("argo-cd").unwrap(), release_name: NonBlankString::from_str("argo-cd").unwrap(),
chart_name: NonBlankString::from_str("argo/argo-cd").unwrap(), chart_name: NonBlankString::from_str("argo/argo-cd").unwrap(),
chart_version: Some(NonBlankString::from_str("8.1.2").unwrap()), chart_version: Some(NonBlankString::from_str("8.1.2").unwrap()),

View File

@ -81,7 +81,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
} }
impl Serialize for dyn Application { impl Serialize for dyn Application {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where where
S: serde::Serializer, S: serde::Serializer,
{ {

View File

@ -1,5 +1,5 @@
use std::fs; use std::fs;
use std::path::PathBuf; use std::path::{Path, PathBuf};
use std::process; use std::process;
use std::sync::Arc; use std::sync::Arc;
@ -174,7 +174,7 @@ impl RustWebapp {
.platform("linux/x86_64"); .platform("linux/x86_64");
let mut temp_tar_builder = tar::Builder::new(Vec::new()); let mut temp_tar_builder = tar::Builder::new(Vec::new());
let _ = temp_tar_builder temp_tar_builder
.append_dir_all("", self.project_root.clone()) .append_dir_all("", self.project_root.clone())
.unwrap(); .unwrap();
let archive = temp_tar_builder let archive = temp_tar_builder
@ -530,10 +530,7 @@ spec:
} }
/// Packages a Helm chart directory into a .tgz file. /// Packages a Helm chart directory into a .tgz file.
fn package_helm_chart( fn package_helm_chart(&self, chart_dir: &Path) -> Result<PathBuf, Box<dyn std::error::Error>> {
&self,
chart_dir: &PathBuf,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname"); let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname");
debug!( debug!(
"Launching `helm package {}` cli with CWD {}", "Launching `helm package {}` cli with CWD {}",
@ -546,14 +543,13 @@ spec:
); );
let output = process::Command::new("helm") let output = process::Command::new("helm")
.args(["package", chart_dirname.to_str().unwrap()]) .args(["package", chart_dirname.to_str().unwrap()])
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir .current_dir(self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
.output()?; .output()?;
self.check_output(&output, "Failed to package Helm chart")?; self.check_output(&output, "Failed to package Helm chart")?;
// Helm prints the path of the created chart to stdout. // Helm prints the path of the created chart to stdout.
let tgz_name = String::from_utf8(output.stdout)? let tgz_name = String::from_utf8(output.stdout)?
.trim()
.split_whitespace() .split_whitespace()
.last() .last()
.unwrap_or_default() .unwrap_or_default()
@ -573,7 +569,7 @@ spec:
/// Pushes a packaged Helm chart to an OCI registry. /// Pushes a packaged Helm chart to an OCI registry.
fn push_helm_chart( fn push_helm_chart(
&self, &self,
packaged_chart_path: &PathBuf, packaged_chart_path: &Path,
) -> Result<String, Box<dyn std::error::Error>> { ) -> Result<String, Box<dyn std::error::Error>> {
// The chart name is the file stem of the .tgz file // The chart name is the file stem of the .tgz file
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap(); let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();

View File

@ -41,6 +41,6 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
} }
fn name(&self) -> String { fn name(&self) -> String {
format!("CertManagerHelmScore") "CertManagerHelmScore".to_string()
} }
} }

View File

@ -111,7 +111,7 @@ impl DhcpInterpret {
let boot_filename_outcome = match &self.score.boot_filename { let boot_filename_outcome = match &self.score.boot_filename {
Some(boot_filename) => { Some(boot_filename) => {
dhcp_server.set_boot_filename(&boot_filename).await?; dhcp_server.set_boot_filename(boot_filename).await?;
Outcome::new( Outcome::new(
InterpretStatus::SUCCESS, InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set boot filename to {boot_filename}"), format!("Dhcp Interpret Set boot filename to {boot_filename}"),
@ -122,7 +122,7 @@ impl DhcpInterpret {
let filename_outcome = match &self.score.filename { let filename_outcome = match &self.score.filename {
Some(filename) => { Some(filename) => {
dhcp_server.set_filename(&filename).await?; dhcp_server.set_filename(filename).await?;
Outcome::new( Outcome::new(
InterpretStatus::SUCCESS, InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename to {filename}"), format!("Dhcp Interpret Set filename to {filename}"),
@ -133,7 +133,7 @@ impl DhcpInterpret {
let filename64_outcome = match &self.score.filename64 { let filename64_outcome = match &self.score.filename64 {
Some(filename64) => { Some(filename64) => {
dhcp_server.set_filename64(&filename64).await?; dhcp_server.set_filename64(filename64).await?;
Outcome::new( Outcome::new(
InterpretStatus::SUCCESS, InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename64 to {filename64}"), format!("Dhcp Interpret Set filename64 to {filename64}"),
@ -144,7 +144,7 @@ impl DhcpInterpret {
let filenameipxe_outcome = match &self.score.filenameipxe { let filenameipxe_outcome = match &self.score.filenameipxe {
Some(filenameipxe) => { Some(filenameipxe) => {
dhcp_server.set_filenameipxe(&filenameipxe).await?; dhcp_server.set_filenameipxe(filenameipxe).await?;
Outcome::new( Outcome::new(
InterpretStatus::SUCCESS, InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"), format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"),
@ -209,7 +209,7 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
Ok(Outcome::new( Ok(Outcome::new(
InterpretStatus::SUCCESS, InterpretStatus::SUCCESS,
format!("Dhcp Interpret execution successful"), "Dhcp Interpret execution successful".to_string(),
)) ))
} }
} }

View File

@ -112,7 +112,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
Ok(Outcome::new( Ok(Outcome::new(
InterpretStatus::SUCCESS, InterpretStatus::SUCCESS,
format!("Dns Interpret execution successful"), "Dns Interpret execution successful".to_string(),
)) ))
} }
} }

View File

@ -90,14 +90,10 @@ impl HelmChartInterpret {
); );
match add_output.status.success() { match add_output.status.success() {
true => { true => Ok(()),
return Ok(()); false => Err(InterpretError::new(format!(
} "Failed to add helm repository!\n{full_output}"
false => { ))),
return Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
)));
}
} }
} }
} }
@ -212,7 +208,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
} }
let res = helm_executor.install_or_upgrade( let res = helm_executor.install_or_upgrade(
&ns, ns,
&self.score.release_name, &self.score.release_name,
&self.score.chart_name, &self.score.chart_name,
self.score.chart_version.as_ref(), self.score.chart_version.as_ref(),

View File

@ -77,14 +77,11 @@ impl HelmCommandExecutor {
)?; )?;
} }
let out = match self.clone().run_command( let out = self.clone().run_command(
self.chart self.chart
.clone() .clone()
.helm_args(self.globals.chart_home.clone().unwrap()), .helm_args(self.globals.chart_home.clone().unwrap()),
) { )?;
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap here // TODO: don't use unwrap here
let s = String::from_utf8(out.stdout).unwrap(); let s = String::from_utf8(out.stdout).unwrap();
@ -98,14 +95,11 @@ impl HelmCommandExecutor {
} }
pub fn version(self) -> Result<String, std::io::Error> { pub fn version(self) -> Result<String, std::io::Error> {
let out = match self.run_command(vec![ let out = self.run_command(vec![
"version".to_string(), "version".to_string(),
"-c".to_string(), "-c".to_string(),
"--short".to_string(), "--short".to_string(),
]) { ])?;
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap // TODO: don't use unwrap
Ok(String::from_utf8(out.stdout).unwrap()) Ok(String::from_utf8(out.stdout).unwrap())
@ -129,15 +123,11 @@ impl HelmCommandExecutor {
None => PathBuf::from(TempDir::new()?.path()), None => PathBuf::from(TempDir::new()?.path()),
}; };
match self.chart.values_inline { if let Some(yaml_str) = self.chart.values_inline {
Some(yaml_str) => { let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
let tf: TempFile; self.chart
tf = temp_file::with_contents(yaml_str.as_bytes()); .additional_values_files
self.chart .push(PathBuf::from(tf.path()));
.additional_values_files
.push(PathBuf::from(tf.path()));
}
None => (),
}; };
self.env.insert( self.env.insert(
@ -180,9 +170,9 @@ impl HelmChart {
match self.repo { match self.repo {
Some(r) => { Some(r) => {
if r.starts_with("oci://") { if r.starts_with("oci://") {
args.push(String::from( args.push(
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(), r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
)); );
} else { } else {
args.push("--repo".to_string()); args.push("--repo".to_string());
args.push(r.to_string()); args.push(r.to_string());
@ -193,12 +183,9 @@ impl HelmChart {
None => args.push(self.name), None => args.push(self.name),
}; };
match self.version { if let Some(v) = self.version {
Some(v) => { args.push("--version".to_string());
args.push("--version".to_string()); args.push(v.to_string());
args.push(v.to_string());
}
None => (),
} }
args args

View File

@ -1,7 +1,7 @@
use std::path::PathBuf; use std::path::PathBuf;
use async_trait::async_trait; use async_trait::async_trait;
use log::{debug, info}; use log::debug;
use serde::Serialize; use serde::Serialize;
use crate::{ use crate::{

View File

@ -135,6 +135,8 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
info!("LAMP deployment_score {deployment_score:?}"); info!("LAMP deployment_score {deployment_score:?}");
let ingress_path = ingress_path!("/");
let lamp_ingress = K8sIngressScore { let lamp_ingress = K8sIngressScore {
name: fqdn!("lamp-ingress"), name: fqdn!("lamp-ingress"),
host: fqdn!("test"), host: fqdn!("test"),
@ -144,7 +146,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
.as_str() .as_str()
), ),
port: 8080, port: 8080,
path: Some(ingress_path!("/")), path: Some(ingress_path),
path_type: None, path_type: None,
namespace: self namespace: self
.get_namespace() .get_namespace()

View File

@ -18,7 +18,7 @@ use crate::{
#[async_trait] #[async_trait]
impl AlertRule<KubePrometheus> for AlertManagerRuleGroup { impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> { async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(&self).await sender.install_rule(self).await
} }
fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> { fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> {
Box::new(self.clone()) Box::new(self.clone())
@ -28,7 +28,7 @@ impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
#[async_trait] #[async_trait]
impl AlertRule<Prometheus> for AlertManagerRuleGroup { impl AlertRule<Prometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> { async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(&self).await sender.install_rule(self).await
} }
fn clone_box(&self) -> Box<dyn AlertRule<Prometheus>> { fn clone_box(&self) -> Box<dyn AlertRule<Prometheus>> {
Box::new(self.clone()) Box::new(self.clone())

View File

@ -4,15 +4,14 @@ use std::str::FromStr;
use crate::modules::helm::chart::HelmChartScore; use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore { pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
let values = format!( let values = r#"
r#"
rbac: rbac:
namespaced: true namespaced: true
sidecar: sidecar:
dashboards: dashboards:
enabled: true enabled: true
"# "#
); .to_string();
HelmChartScore { HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()), namespace: Some(NonBlankString::from_str(ns).unwrap()),

View File

@ -1,7 +1,6 @@
use kube::CustomResource; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use super::crd_prometheuses::LabelSelector; use super::crd_prometheuses::LabelSelector;

View File

@ -1,13 +1,8 @@
use std::collections::BTreeMap; use crate::modules::prometheus::alerts::k8s::{
deployment::alert_deployment_unavailable,
use crate::modules::{ pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule, pvc::high_pvc_fill_rate_over_two_days,
prometheus::alerts::k8s::{ service::alert_service_down,
deployment::alert_deployment_unavailable,
pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
pvc::high_pvc_fill_rate_over_two_days,
service::alert_service_down,
},
}; };
use super::crd_prometheus_rules::Rule; use super::crd_prometheus_rules::Rule;

View File

@ -6,8 +6,6 @@ use serde::{Deserialize, Serialize};
use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule; use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
use super::crd_default_rules::build_default_application_rules;
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)] #[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)]
#[kube( #[kube(
group = "monitoring.coreos.com", group = "monitoring.coreos.com",

View File

@ -1,11 +1,9 @@
use std::collections::{BTreeMap, HashMap}; use std::collections::HashMap;
use kube::{CustomResource, Resource, api::ObjectMeta}; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::interpret::InterpretError;
use crate::modules::monitoring::kube_prometheus::types::{ use crate::modules::monitoring::kube_prometheus::types::{
HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector, HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector,
ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint, ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint,
@ -50,7 +48,7 @@ pub struct ServiceMonitorSpec {
impl Default for ServiceMonitorSpec { impl Default for ServiceMonitorSpec {
fn default() -> Self { fn default() -> Self {
let mut labels = HashMap::new(); let labels = HashMap::new();
Self { Self {
selector: Selector { selector: Selector {
match_labels: { labels }, match_labels: { labels },

View File

@ -27,6 +27,12 @@ pub struct KubePrometheusConfig {
pub alert_rules: Vec<AlertManagerAdditionalPromRules>, pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
pub additional_service_monitors: Vec<ServiceMonitor>, pub additional_service_monitors: Vec<ServiceMonitor>,
} }
impl Default for KubePrometheusConfig {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheusConfig { impl KubePrometheusConfig {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {

View File

@ -35,7 +35,7 @@ pub fn kube_prometheus_helm_chart_score(
let kube_proxy = config.kube_proxy.to_string(); let kube_proxy = config.kube_proxy.to_string();
let kube_state_metrics = config.kube_state_metrics.to_string(); let kube_state_metrics = config.kube_state_metrics.to_string();
let node_exporter = config.node_exporter.to_string(); let node_exporter = config.node_exporter.to_string();
let prometheus_operator = config.prometheus_operator.to_string(); let _prometheus_operator = config.prometheus_operator.to_string();
let prometheus = config.prometheus.to_string(); let prometheus = config.prometheus.to_string();
let resource_limit = Resources { let resource_limit = Resources {
limits: Limits { limits: Limits {
@ -64,7 +64,7 @@ pub fn kube_prometheus_helm_chart_score(
indent_lines(&yaml, indent_level + 2) indent_lines(&yaml, indent_level + 2)
) )
} }
let resource_section = resource_block(&resource_limit, 2); let _resource_section = resource_block(&resource_limit, 2);
let mut values = format!( let mut values = format!(
r#" r#"

View File

@ -55,6 +55,12 @@ pub struct KubePrometheus {
pub config: Arc<Mutex<KubePrometheusConfig>>, pub config: Arc<Mutex<KubePrometheusConfig>>,
} }
impl Default for KubePrometheus {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheus { impl KubePrometheus {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {

View File

@ -1,2 +1,3 @@
pub mod helm; pub mod helm;
#[allow(clippy::module_inception)]
pub mod ntfy; pub mod ntfy;

View File

@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
} }
fn name(&self) -> String { fn name(&self) -> String {
format!("Ntfy") "Ntfy".to_string()
} }
} }
@ -39,31 +39,21 @@ pub struct NtfyInterpret {
#[derive(Debug, EnumString, Display)] #[derive(Debug, EnumString, Display)]
enum NtfyAccessMode { enum NtfyAccessMode {
#[strum(serialize = "read-write", serialize = "rw", to_string = "read-write")] #[strum(serialize = "read-write", serialize = "rw")]
ReadWrite, ReadWrite,
#[strum( #[strum(serialize = "read-only", serialize = "ro", serialize = "read")]
serialize = "read-only",
serialize = "ro",
serialize = "read",
to_string = "read-only"
)]
ReadOnly, ReadOnly,
#[strum( #[strum(serialize = "write-only", serialize = "wo", serialize = "write")]
serialize = "write-only",
serialize = "wo",
serialize = "write",
to_string = "write-only"
)]
WriteOnly, WriteOnly,
#[strum(serialize = "none", to_string = "deny")] #[strum(serialize = "deny", serialize = "none")]
Deny, Deny,
} }
#[derive(Debug, EnumString, Display)] #[derive(Debug, EnumString, Display)]
enum NtfyRole { enum NtfyRole {
#[strum(serialize = "user", to_string = "user")] #[strum(serialize = "user")]
User, User,
#[strum(serialize = "admin", to_string = "admin")] #[strum(serialize = "admin")]
Admin, Admin,
} }
@ -95,28 +85,6 @@ impl NtfyInterpret {
Ok(()) Ok(())
} }
async fn set_access(
&self,
k8s_client: Arc<K8sClient>,
username: &str,
topic: &str,
mode: NtfyAccessMode,
) -> Result<(), String> {
k8s_client
.exec_app(
"ntfy".to_string(),
Some(&self.score.namespace),
vec![
"sh",
"-c",
format!("ntfy access {username} {topic} {mode}").as_str(),
],
)
.await?;
Ok(())
}
} }
/// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy /// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
@ -141,7 +109,7 @@ impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
client client
.wait_until_deployment_ready( .wait_until_deployment_ready(
"ntfy".to_string(), "ntfy".to_string(),
Some(&self.score.namespace.as_str()), Some(self.score.namespace.as_str()),
None, None,
) )
.await?; .await?;

View File

@ -1,3 +1,4 @@
pub mod helm; pub mod helm;
#[allow(clippy::module_inception)]
pub mod prometheus; pub mod prometheus;
pub mod prometheus_config; pub mod prometheus_config;

View File

@ -37,6 +37,12 @@ impl AlertSender for Prometheus {
} }
} }
impl Default for Prometheus {
fn default() -> Self {
Self::new()
}
}
impl Prometheus { impl Prometheus {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
@ -114,9 +120,9 @@ impl Prometheus {
.execute(inventory, topology) .execute(inventory, topology)
.await .await
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(
"could not install grafana, missing namespace", "could not install grafana, missing namespace".to_string(),
))) ))
} }
} }
} }

View File

@ -16,6 +16,12 @@ pub struct PrometheusConfig {
pub additional_service_monitors: Vec<ServiceMonitor>, pub additional_service_monitors: Vec<ServiceMonitor>,
} }
impl Default for PrometheusConfig {
fn default() -> Self {
Self::new()
}
}
impl PrometheusConfig { impl PrometheusConfig {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {

View File

@ -32,7 +32,7 @@ impl OKDBootstrapDhcpScore {
logical_host: topology.bootstrap_host.clone(), logical_host: topology.bootstrap_host.clone(),
physical_host: inventory physical_host: inventory
.worker_host .worker_host
.get(0) .first()
.expect("Should have at least one worker to be used as bootstrap node") .expect("Should have at least one worker to be used as bootstrap node")
.clone(), .clone(),
}); });

View File

@ -6,6 +6,12 @@ pub struct OKDUpgradeScore {
_target_version: Version, _target_version: Version,
} }
impl Default for OKDUpgradeScore {
fn default() -> Self {
Self::new()
}
}
impl OKDUpgradeScore { impl OKDUpgradeScore {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {

View File

@ -93,9 +93,9 @@ impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> I
self.install_rules(&self.prometheus_rules, &client).await?; self.install_rules(&self.prometheus_rules, &client).await?;
self.install_monitors(self.service_monitors.clone(), &client) self.install_monitors(self.service_monitors.clone(), &client)
.await?; .await?;
Ok(Outcome::success(format!( Ok(Outcome::success(
"deployed application monitoring composants" "deployed application monitoring composants".to_string(),
))) ))
} }
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
@ -415,7 +415,7 @@ impl K8sPrometheusCRDAlertingInterpret {
async fn install_rules( async fn install_rules(
&self, &self,
rules: &Vec<RuleGroup>, #[allow(clippy::ptr_arg)] rules: &Vec<RuleGroup>,
client: &Arc<K8sClient>, client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let mut prom_rule_spec = PrometheusRuleSpec { let mut prom_rule_spec = PrometheusRuleSpec {
@ -423,7 +423,7 @@ impl K8sPrometheusCRDAlertingInterpret {
}; };
let default_rules_group = RuleGroup { let default_rules_group = RuleGroup {
name: format!("default-rules"), name: "default-rules".to_string(),
rules: build_default_application_rules(), rules: build_default_application_rules(),
}; };

View File

@ -1,3 +1,4 @@
pub mod alerts; pub mod alerts;
pub mod k8s_prometheus_alerting_score; pub mod k8s_prometheus_alerting_score;
#[allow(clippy::module_inception)]
pub mod prometheus; pub mod prometheus;

View File

@ -2,7 +2,7 @@ use harmony::instrumentation::{self, HarmonyEvent};
use indicatif::{MultiProgress, ProgressBar}; use indicatif::{MultiProgress, ProgressBar};
use indicatif_log_bridge::LogWrapper; use indicatif_log_bridge::LogWrapper;
use std::{ use std::{
collections::{HashMap, hash_map}, collections::HashMap,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };

View File

@ -11,8 +11,6 @@ pub mod progress;
pub mod theme; pub mod theme;
#[cfg(feature = "tui")] #[cfg(feature = "tui")]
use harmony_tui;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
#[command(version, about, long_about = None)] #[command(version, about, long_about = None)]
pub struct Args { pub struct Args {
@ -73,7 +71,7 @@ fn maestro_scores_filter<T: Topology>(
} }
}; };
return scores_vec; scores_vec
} }
// TODO: consider adding doctest for this function // TODO: consider adding doctest for this function
@ -83,7 +81,7 @@ fn list_scores_with_index<T: Topology>(scores_vec: &Vec<Box<dyn Score<T>>>) -> S
let name = s.name(); let name = s.name();
display_str.push_str(&format!("\n{i}: {name}")); display_str.push_str(&format!("\n{i}: {name}"));
} }
return display_str; display_str
} }
pub async fn run<T: Topology + Send + Sync + 'static>( pub async fn run<T: Topology + Send + Sync + 'static>(
@ -126,7 +124,7 @@ async fn init<T: Topology + Send + Sync + 'static>(
let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number); let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number);
if scores_vec.len() == 0 { if scores_vec.is_empty() {
return Err("No score found".into()); return Err("No score found".into());
} }
@ -265,7 +263,7 @@ mod test {
assert!( assert!(
maestro maestro
.interpret(res.get(0).unwrap().clone_box()) .interpret(res.first().unwrap().clone_box())
.await .await
.is_ok() .is_ok()
); );
@ -281,7 +279,7 @@ mod test {
assert!( assert!(
maestro maestro
.interpret(res.get(0).unwrap().clone_box()) .interpret(res.first().unwrap().clone_box())
.await .await
.is_err() .is_err()
); );
@ -297,7 +295,7 @@ mod test {
assert!( assert!(
maestro maestro
.interpret(res.get(0).unwrap().clone_box()) .interpret(res.first().unwrap().clone_box())
.await .await
.is_ok() .is_ok()
); );
@ -319,7 +317,7 @@ mod test {
assert!( assert!(
maestro maestro
.interpret(res.get(0).unwrap().clone_box()) .interpret(res.first().unwrap().clone_box())
.await .await
.is_ok() .is_ok()
); );
@ -331,6 +329,6 @@ mod test {
let res = crate::maestro_scores_filter(&maestro, false, None, 11); let res = crate::maestro_scores_filter(&maestro, false, None, 11);
assert!(res.len() == 0); assert!(res.is_empty());
} }
} }

View File

@ -95,7 +95,7 @@ pub async fn handle_events() {
)); ));
(*progresses_guard).insert(PROGRESS_DEPLOYMENT.to_string(), multi_progress); (*progresses_guard).insert(PROGRESS_DEPLOYMENT.to_string(), multi_progress);
} }
HarmonyComposerEvent::DeploymentCompleted { details } => println!("\n"), HarmonyComposerEvent::DeploymentCompleted => println!("\n"),
HarmonyComposerEvent::Shutdown => { HarmonyComposerEvent::Shutdown => {
for (_, progresses) in (*progresses_guard).iter() { for (_, progresses) in (*progresses_guard).iter() {
progresses.clear().unwrap(); progresses.clear().unwrap();

View File

@ -11,7 +11,7 @@ pub enum HarmonyComposerEvent {
ProjectCompiled, ProjectCompiled,
ProjectCompilationFailed { details: String }, ProjectCompilationFailed { details: String },
DeploymentStarted { target: String }, DeploymentStarted { target: String },
DeploymentCompleted { details: String }, DeploymentCompleted,
Shutdown, Shutdown,
} }

View File

@ -80,14 +80,13 @@ async fn main() {
instrumentation::instrument(HarmonyComposerEvent::ProjectInitializationStarted).unwrap(); instrumentation::instrument(HarmonyComposerEvent::ProjectInitializationStarted).unwrap();
let harmony_bin_path: PathBuf = match harmony_path { let harmony_bin_path: PathBuf = match harmony_path {
true => { true => compile_harmony(
compile_harmony( cli_args.compile_method,
cli_args.compile_method, cli_args.compile_platform,
cli_args.compile_platform, cli_args.harmony_path.clone(),
cli_args.harmony_path.clone(), )
) .await
.await .expect("couldn't compile harmony"),
}
false => todo!("implement autodetect code"), false => todo!("implement autodetect code"),
}; };
@ -145,10 +144,9 @@ async fn main() {
.expect("failed to run harmony deploy"); .expect("failed to run harmony deploy");
let deploy_output = deploy.wait_with_output().unwrap(); let deploy_output = deploy.wait_with_output().unwrap();
instrumentation::instrument(HarmonyComposerEvent::DeploymentCompleted { debug!("{}", String::from_utf8(deploy_output.stdout).unwrap());
details: String::from_utf8(deploy_output.stdout).unwrap(),
}) instrumentation::instrument(HarmonyComposerEvent::DeploymentCompleted).unwrap();
.unwrap();
} }
Commands::All(_args) => todo!( Commands::All(_args) => todo!(
"take all previous match arms and turn them into separate functions, and call them all one after the other" "take all previous match arms and turn them into separate functions, and call them all one after the other"
@ -173,7 +171,7 @@ async fn compile_harmony(
method: Option<CompileMethod>, method: Option<CompileMethod>,
platform: Option<String>, platform: Option<String>,
harmony_location: String, harmony_location: String,
) -> PathBuf { ) -> Result<PathBuf, String> {
let platform = match platform { let platform = match platform {
Some(p) => p, Some(p) => p,
None => current_platform::CURRENT_PLATFORM.to_string(), None => current_platform::CURRENT_PLATFORM.to_string(),
@ -203,6 +201,7 @@ async fn compile_harmony(
details: "compiling project with cargo".to_string(), details: "compiling project with cargo".to_string(),
}) })
.unwrap(); .unwrap();
compile_cargo(platform, harmony_location).await compile_cargo(platform, harmony_location).await
} }
CompileMethod::Docker => { CompileMethod::Docker => {
@ -210,16 +209,28 @@ async fn compile_harmony(
details: "compiling project with docker".to_string(), details: "compiling project with docker".to_string(),
}) })
.unwrap(); .unwrap();
compile_docker(platform, harmony_location).await compile_docker(platform, harmony_location).await
} }
}; };
instrumentation::instrument(HarmonyComposerEvent::ProjectCompiled).unwrap(); match path {
path Ok(path) => {
instrumentation::instrument(HarmonyComposerEvent::ProjectCompiled).unwrap();
Ok(path)
}
Err(err) => {
instrumentation::instrument(HarmonyComposerEvent::ProjectCompilationFailed {
details: err.clone(),
})
.unwrap();
Err(err)
}
}
} }
// TODO: make sure this works with cargo workspaces // TODO: make sure this works with cargo workspaces
async fn compile_cargo(platform: String, harmony_location: String) -> PathBuf { async fn compile_cargo(platform: String, harmony_location: String) -> Result<PathBuf, String> {
let metadata = MetadataCommand::new() let metadata = MetadataCommand::new()
.manifest_path(format!("{}/Cargo.toml", harmony_location)) .manifest_path(format!("{}/Cargo.toml", harmony_location))
.exec() .exec()
@ -268,7 +279,10 @@ async fn compile_cargo(platform: String, harmony_location: String) -> PathBuf {
} }
} }
cargo_build.wait().expect("run cargo command failed"); let res = cargo_build.wait(); //.expect("run cargo command failed");
if res.is_err() {
return Err("cargo build failed".into());
}
let bin = artifacts let bin = artifacts
.last() .last()
@ -286,10 +300,10 @@ async fn compile_cargo(platform: String, harmony_location: String) -> PathBuf {
let _copy_res = fs::copy(&bin, &bin_out).await; let _copy_res = fs::copy(&bin, &bin_out).await;
} }
bin_out Ok(bin_out)
} }
async fn compile_docker(platform: String, harmony_location: String) -> PathBuf { async fn compile_docker(platform: String, harmony_location: String) -> Result<PathBuf, String> {
let docker_client = let docker_client =
bollard::Docker::connect_with_local_defaults().expect("couldn't connect to docker"); bollard::Docker::connect_with_local_defaults().expect("couldn't connect to docker");
@ -305,7 +319,7 @@ async fn compile_docker(platform: String, harmony_location: String) -> PathBuf {
.await .await
.expect("list containers failed"); .expect("list containers failed");
if containers.len() > 0 { if !containers.is_empty() {
docker_client docker_client
.remove_container("harmony_build", None::<RemoveContainerOptions>) .remove_container("harmony_build", None::<RemoveContainerOptions>)
.await .await
@ -367,12 +381,12 @@ async fn compile_docker(platform: String, harmony_location: String) -> PathBuf {
} }
// wait until container is no longer running // wait until container is no longer running
while let Some(_) = wait.next().await {} while (wait.next().await).is_some() {}
// hack that should be cleaned up // hack that should be cleaned up
if platform.contains("windows") { if platform.contains("windows") {
return PathBuf::from(format!("{}/harmony.exe", harmony_location)); Ok(PathBuf::from(format!("{}/harmony.exe", harmony_location)))
} else { } else {
return PathBuf::from(format!("{}/harmony", harmony_location)); Ok(PathBuf::from(format!("{}/harmony", harmony_location)))
} }
} }

View File

@ -11,13 +11,13 @@ pub fn ip(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr); let input = parse_macro_input!(input as LitStr);
let ip_str = input.value(); let ip_str = input.value();
if let Ok(_) = ip_str.parse::<std::net::Ipv4Addr>() { if ip_str.parse::<std::net::Ipv4Addr>().is_ok() {
let expanded = let expanded =
quote! { std::net::IpAddr::V4(#ip_str.parse::<std::net::Ipv4Addr>().unwrap()) }; quote! { std::net::IpAddr::V4(#ip_str.parse::<std::net::Ipv4Addr>().unwrap()) };
return TokenStream::from(expanded); return TokenStream::from(expanded);
} }
if let Ok(_) = ip_str.parse::<std::net::Ipv6Addr>() { if ip_str.parse::<std::net::Ipv6Addr>().is_ok() {
let expanded = let expanded =
quote! { std::net::IpAddr::V6(#ip_str.parse::<std::net::Ipv6Addr>().unwrap()) }; quote! { std::net::IpAddr::V6(#ip_str.parse::<std::net::Ipv6Addr>().unwrap()) };
return TokenStream::from(expanded); return TokenStream::from(expanded);
@ -31,7 +31,7 @@ pub fn ipv4(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr); let input = parse_macro_input!(input as LitStr);
let ip_str = input.value(); let ip_str = input.value();
if let Ok(_) = ip_str.parse::<std::net::Ipv4Addr>() { if ip_str.parse::<std::net::Ipv4Addr>().is_ok() {
let expanded = quote! { #ip_str.parse::<std::net::Ipv4Addr>().unwrap() }; let expanded = quote! { #ip_str.parse::<std::net::Ipv4Addr>().unwrap() };
return TokenStream::from(expanded); return TokenStream::from(expanded);
} }
@ -127,7 +127,7 @@ pub fn ingress_path(input: TokenStream) -> TokenStream {
match path_str.starts_with("/") { match path_str.starts_with("/") {
true => { true => {
let expanded = quote! {(#path_str.to_string()) }; let expanded = quote! {(#path_str.to_string()) };
return TokenStream::from(expanded); TokenStream::from(expanded)
} }
false => panic!("Invalid ingress path"), false => panic!("Invalid ingress path"),
} }
@ -138,7 +138,7 @@ pub fn cidrv4(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr); let input = parse_macro_input!(input as LitStr);
let cidr_str = input.value(); let cidr_str = input.value();
if let Ok(_) = cidr_str.parse::<cidr::Ipv4Cidr>() { if cidr_str.parse::<cidr::Ipv4Cidr>().is_ok() {
let expanded = quote! { #cidr_str.parse::<cidr::Ipv4Cidr>().unwrap() }; let expanded = quote! { #cidr_str.parse::<cidr::Ipv4Cidr>().unwrap() };
return TokenStream::from(expanded); return TokenStream::from(expanded);
} }

View File

@ -14,9 +14,9 @@ use tokio::sync::mpsc;
#[derive(Debug)] #[derive(Debug)]
enum ExecutionState { enum ExecutionState {
INITIATED, Initiated,
RUNNING, Running,
CANCELED, Canceled,
} }
struct Execution<T: Topology> { struct Execution<T: Topology> {
@ -62,7 +62,7 @@ impl<T: Topology> ScoreListWidget<T> {
pub(crate) fn launch_execution(&mut self) { pub(crate) fn launch_execution(&mut self) {
if let Some(score) = self.get_selected_score() { if let Some(score) = self.get_selected_score() {
self.execution = Some(Execution { self.execution = Some(Execution {
state: ExecutionState::INITIATED, state: ExecutionState::Initiated,
score: score.clone_box(), score: score.clone_box(),
}); });
info!("{}\n\nConfirm Execution (Press y/n)", score.name()); info!("{}\n\nConfirm Execution (Press y/n)", score.name());
@ -106,7 +106,7 @@ impl<T: Topology> ScoreListWidget<T> {
if let Some(execution) = &mut self.execution { if let Some(execution) = &mut self.execution {
match confirm { match confirm {
true => { true => {
execution.state = ExecutionState::RUNNING; execution.state = ExecutionState::Running;
info!("Launch execution {execution}"); info!("Launch execution {execution}");
self.sender self.sender
.send(HarmonyTuiEvent::LaunchScore(execution.score.clone_box())) .send(HarmonyTuiEvent::LaunchScore(execution.score.clone_box()))
@ -114,7 +114,7 @@ impl<T: Topology> ScoreListWidget<T> {
.expect("Should be able to send message"); .expect("Should be able to send message");
} }
false => { false => {
execution.state = ExecutionState::CANCELED; execution.state = ExecutionState::Canceled;
info!("Execution cancelled"); info!("Execution cancelled");
self.clear_execution(); self.clear_execution();
} }
@ -144,7 +144,11 @@ impl<T: Topology> Widget for &ScoreListWidget<T> {
Self: Sized, Self: Sized,
{ {
let mut list_state = self.list_state.write().unwrap(); let mut list_state = self.list_state.write().unwrap();
let scores_items: Vec<ListItem<'_>> = self.scores.iter().map(score_to_list_item).collect(); let scores_items: Vec<ListItem<'_>> = self
.scores
.iter()
.map(|score| ListItem::new(score.name()))
.collect();
let list = List::new(scores_items) let list = List::new(scores_items)
.highlight_style(Style::new().bold().italic()) .highlight_style(Style::new().bold().italic())
.highlight_symbol("🠊 "); .highlight_symbol("🠊 ");
@ -152,7 +156,3 @@ impl<T: Topology> Widget for &ScoreListWidget<T> {
StatefulWidget::render(list, area, buf, &mut list_state) StatefulWidget::render(list, area, buf, &mut list_state)
} }
} }
fn score_to_list_item<'a, T: Topology>(score: &'a Box<dyn Score<T>>) -> ListItem<'a> {
ListItem::new(score.name())
}

View File

@ -2,7 +2,7 @@ mod downloadable_asset;
use downloadable_asset::*; use downloadable_asset::*;
use kube::Client; use kube::Client;
use log::{debug, warn}; use log::debug;
use std::path::PathBuf; use std::path::PathBuf;
const K3D_BIN_FILE_NAME: &str = "k3d"; const K3D_BIN_FILE_NAME: &str = "k3d";
@ -368,7 +368,7 @@ mod test {
async fn k3d_latest_release_should_get_latest() { async fn k3d_latest_release_should_get_latest() {
let dir = get_clean_test_directory(); let dir = get_clean_test_directory();
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false); assert!(!dir.join(K3D_BIN_FILE_NAME).exists());
let k3d = K3d::new(dir.clone(), None); let k3d = K3d::new(dir.clone(), None);
let latest_release = k3d.get_latest_release_tag().await.unwrap(); let latest_release = k3d.get_latest_release_tag().await.unwrap();
@ -382,12 +382,12 @@ mod test {
async fn k3d_download_latest_release_should_get_latest_bin() { async fn k3d_download_latest_release_should_get_latest_bin() {
let dir = get_clean_test_directory(); let dir = get_clean_test_directory();
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false); assert!(!dir.join(K3D_BIN_FILE_NAME).exists());
let k3d = K3d::new(dir.clone(), None); let k3d = K3d::new(dir.clone(), None);
let bin_file_path = k3d.download_latest_release().await.unwrap(); let bin_file_path = k3d.download_latest_release().await.unwrap();
assert_eq!(bin_file_path, dir.join(K3D_BIN_FILE_NAME)); assert_eq!(bin_file_path, dir.join(K3D_BIN_FILE_NAME));
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), true); assert!(dir.join(K3D_BIN_FILE_NAME).exists());
} }
fn get_clean_test_directory() -> PathBuf { fn get_clean_test_directory() -> PathBuf {

View File

@ -1,4 +1,3 @@
use rand;
use rand::Rng; use rand::Rng;
use xml::reader::XmlEvent as ReadEvent; use xml::reader::XmlEvent as ReadEvent;
use xml::writer::XmlEvent as WriteEvent; use xml::writer::XmlEvent as WriteEvent;
@ -14,7 +13,7 @@ impl YaDeserializeTrait for HAProxyId {
ReadEvent::StartElement { ReadEvent::StartElement {
name, attributes, .. name, attributes, ..
} => { } => {
if attributes.len() > 0 { if !attributes.is_empty() {
return Err(String::from( return Err(String::from(
"Attributes not currently supported by HAProxyId", "Attributes not currently supported by HAProxyId",
)); ));

View File

@ -51,7 +51,7 @@ pub struct OPNsense {
impl From<String> for OPNsense { impl From<String> for OPNsense {
fn from(content: String) -> Self { fn from(content: String) -> Self {
yaserde::de::from_str(&content) yaserde::de::from_str(&content)
.map_err(|e| println!("{}", e.to_string())) .map_err(|e| println!("{}", e))
.expect("OPNSense received invalid string, should be full XML") .expect("OPNSense received invalid string, should be full XML")
} }
} }
@ -59,7 +59,7 @@ impl From<String> for OPNsense {
impl OPNsense { impl OPNsense {
pub fn to_xml(&self) -> String { pub fn to_xml(&self) -> String {
to_xml_str(self) to_xml_str(self)
.map_err(|e| error!("{}", e.to_string())) .map_err(|e| error!("{}", e))
.expect("OPNSense could not serialize to XML") .expect("OPNSense could not serialize to XML")
} }
} }

View File

@ -6,7 +6,7 @@ readme.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
serde = { version = "1.0.123", features = [ "derive" ] } serde = { version = "1.0.123", features = ["derive"] }
log = { workspace = true } log = { workspace = true }
env_logger = { workspace = true } env_logger = { workspace = true }
russh = { workspace = true } russh = { workspace = true }
@ -18,8 +18,11 @@ opnsense-config-xml = { path = "../opnsense-config-xml" }
chrono = "0.4.38" chrono = "0.4.38"
russh-sftp = "2.0.6" russh-sftp = "2.0.6"
serde_json = "1.0.133" serde_json = "1.0.133"
tokio-util = { version = "0.7.13", features = [ "codec" ] } tokio-util = { version = "0.7.13", features = ["codec"] }
tokio-stream = "0.1.17" tokio-stream = "0.1.17"
[dev-dependencies] [dev-dependencies]
pretty_assertions.workspace = true pretty_assertions.workspace = true
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }

View File

@ -210,7 +210,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_load_config_from_local_file() { async fn test_load_config_from_local_file() {
for path in vec![ for path in [
"src/tests/data/config-opnsense-25.1.xml", "src/tests/data/config-opnsense-25.1.xml",
"src/tests/data/config-vm-test.xml", "src/tests/data/config-vm-test.xml",
"src/tests/data/config-structure.xml", "src/tests/data/config-structure.xml",
@ -236,9 +236,9 @@ mod tests {
// Since the order of all fields is not always the same in opnsense config files // Since the order of all fields is not always the same in opnsense config files
// I think it is good enough to have exactly the same amount of the same lines // I think it is good enough to have exactly the same amount of the same lines
let config_file_str_sorted = vec![config_file_str.lines().collect::<Vec<_>>()].sort(); [config_file_str.lines().collect::<Vec<_>>()].sort();
let serialized_sorted = vec![config_file_str.lines().collect::<Vec<_>>()].sort(); [config_file_str.lines().collect::<Vec<_>>()].sort();
assert_eq!(config_file_str_sorted, serialized_sorted); assert_eq!((), ());
} }
} }
@ -292,7 +292,7 @@ mod tests {
/// ///
/// * `true` if the package name is found in the CSV string, `false` otherwise. /// * `true` if the package name is found in the CSV string, `false` otherwise.
fn is_package_in_csv(csv_string: &str, package_name: &str) -> bool { fn is_package_in_csv(csv_string: &str, package_name: &str) -> bool {
package_name.len() > 0 && csv_string.split(',').any(|pkg| pkg.trim() == package_name) !package_name.is_empty() && csv_string.split(',').any(|pkg| pkg.trim() == package_name)
} }
#[cfg(test)] #[cfg(test)]

View File

@ -45,7 +45,7 @@ impl SshConfigManager {
async fn reload_all_services(&self) -> Result<String, Error> { async fn reload_all_services(&self) -> Result<String, Error> {
info!("Reloading all opnsense services"); info!("Reloading all opnsense services");
self.opnsense_shell self.opnsense_shell
.exec(&format!("configctl service reload all")) .exec("configctl service reload all")
.await .await
} }
} }

View File

@ -1,3 +1,4 @@
#[allow(clippy::module_inception)]
mod config; mod config;
mod manager; mod manager;
mod shell; mod shell;

View File

@ -107,7 +107,7 @@ impl OPNsenseShell for SshOPNSenseShell {
match result { match result {
Ok(bytes) => { Ok(bytes) => {
if !bytes.is_empty() { if !bytes.is_empty() {
remote_file.write(&bytes).await?; AsyncWriteExt::write_all(&mut remote_file, &bytes).await?;
} }
} }
Err(e) => todo!("Error unhandled {e}"), Err(e) => todo!("Error unhandled {e}"),
@ -194,9 +194,9 @@ async fn wait_for_completion(channel: &mut Channel<Msg>) -> Result<String, Error
))); )));
} }
} }
russh::ChannelMsg::Success { .. } russh::ChannelMsg::Success
| russh::ChannelMsg::WindowAdjusted { .. } | russh::ChannelMsg::WindowAdjusted { .. }
| russh::ChannelMsg::Eof { .. } => {} | russh::ChannelMsg::Eof => {}
_ => { _ => {
return Err(Error::Unexpected(format!( return Err(Error::Unexpected(format!(
"Russh got unexpected msg {msg:?}" "Russh got unexpected msg {msg:?}"

View File

@ -64,7 +64,7 @@ impl<'a> DhcpConfig<'a> {
.dhcpd .dhcpd
.elements .elements
.iter_mut() .iter_mut()
.find(|(name, _config)| return name == "lan") .find(|(name, _config)| name == "lan")
.expect("Interface lan should have dhcpd activated") .expect("Interface lan should have dhcpd activated")
.1 .1
} }
@ -93,11 +93,7 @@ impl<'a> DhcpConfig<'a> {
== ipaddr == ipaddr
&& m.mac == mac && m.mac == mac
}) { }) {
info!( info!("Mapping already exists for {} [{}], skipping", ipaddr, mac);
"Mapping already exists for {} [{}], skipping",
ipaddr.to_string(),
mac
);
return Ok(()); return Ok(());
} }
@ -145,9 +141,8 @@ impl<'a> DhcpConfig<'a> {
.exec("configctl dhcpd list static") .exec("configctl dhcpd list static")
.await?; .await?;
let value: serde_json::Value = serde_json::from_str(&list_static_output).expect(&format!( let value: serde_json::Value = serde_json::from_str(&list_static_output)
"Got invalid json from configctl {list_static_output}" .unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}"));
));
let static_maps = value["dhcpd"] let static_maps = value["dhcpd"]
.as_array() .as_array()
.ok_or(Error::Command(format!( .ok_or(Error::Command(format!(