Compare commits
	
		
			2 Commits
		
	
	
		
			024084859e
			...
			c64b59a4ec
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | c64b59a4ec | ||
|  | 74182bbc60 | 
							
								
								
									
										2
									
								
								check.sh
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								check.sh
									
									
									
									
									
								
							| @ -1,5 +1,7 @@ | ||||
| #!/bin/sh | ||||
| set -e | ||||
| 
 | ||||
| cargo check --all-targets --all-features --keep-going | ||||
| cargo fmt --check | ||||
| cargo clippy | ||||
| cargo test | ||||
|  | ||||
| @ -1,17 +1,11 @@ | ||||
| use std::{path::PathBuf, sync::Arc}; | ||||
| use std::{path::PathBuf, str::FromStr, sync::Arc}; | ||||
| 
 | ||||
| use harmony::{ | ||||
|     data::Id, | ||||
|     inventory::Inventory, | ||||
|     maestro::Maestro, | ||||
|     modules::{ | ||||
|         application::{ | ||||
|             ApplicationScore, RustWebFramework, RustWebapp, | ||||
|             features::{ContinuousDelivery, Monitoring}, | ||||
|         }, | ||||
|         monitoring::alert_channel::{ | ||||
|             discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver, | ||||
|         }, | ||||
|         application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring}, | ||||
|         monitoring::alert_channel::webhook_receiver::WebhookReceiver, | ||||
|         tenant::TenantScore, | ||||
|     }, | ||||
|     topology::{K8sAnywhereTopology, Url, tenant::TenantConfig}, | ||||
| @ -25,7 +19,7 @@ async fn main() { | ||||
|     //the TenantConfig.name must match
 | ||||
|     let tenant = TenantScore { | ||||
|         config: TenantConfig { | ||||
|             id: Id::from_str("test-tenant-id"), | ||||
|             id: Id::from_str("test-tenant-id").unwrap(), | ||||
|             name: "example-monitoring".to_string(), | ||||
|             ..Default::default() | ||||
|         }, | ||||
|  | ||||
| @ -125,40 +125,47 @@ spec: | ||||
|                   name: nginx"#,
 | ||||
|     ) | ||||
|     .unwrap(); | ||||
|     return deployment; | ||||
|     deployment | ||||
| } | ||||
| fn nginx_deployment_2() -> Deployment { | ||||
|     let mut pod_template = PodTemplateSpec::default(); | ||||
|     pod_template.metadata = Some(ObjectMeta { | ||||
|     let pod_template = PodTemplateSpec { | ||||
|         metadata: Some(ObjectMeta { | ||||
|             labels: Some(BTreeMap::from([( | ||||
|                 "app".to_string(), | ||||
|                 "nginx-test".to_string(), | ||||
|             )])), | ||||
|             ..Default::default() | ||||
|     }); | ||||
|     pod_template.spec = Some(PodSpec { | ||||
|         }), | ||||
|         spec: Some(PodSpec { | ||||
|             containers: vec![Container { | ||||
|                 name: "nginx".to_string(), | ||||
|                 image: Some("nginx".to_string()), | ||||
|                 ..Default::default() | ||||
|             }], | ||||
|             ..Default::default() | ||||
|     }); | ||||
|     let mut spec = DeploymentSpec::default(); | ||||
|     spec.template = pod_template; | ||||
|     spec.selector = LabelSelector { | ||||
|         }), | ||||
|     }; | ||||
| 
 | ||||
|     let spec = DeploymentSpec { | ||||
|         template: pod_template, | ||||
|         selector: LabelSelector { | ||||
|             match_expressions: None, | ||||
|             match_labels: Some(BTreeMap::from([( | ||||
|                 "app".to_string(), | ||||
|                 "nginx-test".to_string(), | ||||
|             )])), | ||||
|         }, | ||||
|         ..Default::default() | ||||
|     }; | ||||
| 
 | ||||
|     let mut deployment = Deployment::default(); | ||||
|     deployment.spec = Some(spec); | ||||
|     deployment.metadata.name = Some("nginx-test".to_string()); | ||||
| 
 | ||||
|     deployment | ||||
|     Deployment { | ||||
|         spec: Some(spec), | ||||
|         metadata: ObjectMeta { | ||||
|             name: Some("nginx-test".to_string()), | ||||
|             ..Default::default() | ||||
|         }, | ||||
|         ..Default::default() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn nginx_deployment() -> Deployment { | ||||
|  | ||||
| @ -23,7 +23,7 @@ async fn main() { | ||||
|         // This config can be extended as needed for more complicated configurations
 | ||||
|         config: LAMPConfig { | ||||
|             project_root: "./php".into(), | ||||
|             database_size: format!("4Gi").into(), | ||||
|             database_size: "4Gi".to_string().into(), | ||||
|             ..Default::default() | ||||
|         }, | ||||
|     }; | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| use std::collections::HashMap; | ||||
| use std::{collections::HashMap, str::FromStr}; | ||||
| 
 | ||||
| use harmony::{ | ||||
|     data::Id, | ||||
| @ -28,7 +28,7 @@ use harmony::{ | ||||
| async fn main() { | ||||
|     let tenant = TenantScore { | ||||
|         config: TenantConfig { | ||||
|             id: Id::from_string("1234".to_string()), | ||||
|             id: Id::from_str("1234").unwrap(), | ||||
|             name: "test-tenant".to_string(), | ||||
|             resource_limits: ResourceLimits { | ||||
|                 cpu_request_cores: 6.0, | ||||
|  | ||||
| @ -1,3 +1,5 @@ | ||||
| use std::str::FromStr; | ||||
| 
 | ||||
| use harmony::{ | ||||
|     data::Id, | ||||
|     inventory::Inventory, | ||||
| @ -9,7 +11,7 @@ use harmony::{ | ||||
| async fn main() { | ||||
|     let tenant = TenantScore { | ||||
|         config: TenantConfig { | ||||
|             id: Id::from_str("test-tenant-id"), | ||||
|             id: Id::from_str("test-tenant-id").unwrap(), | ||||
|             name: "testtenant".to_string(), | ||||
|             ..Default::default() | ||||
|         }, | ||||
|  | ||||
| @ -1,5 +1,6 @@ | ||||
| use rand::distr::Alphanumeric; | ||||
| use rand::distr::SampleString; | ||||
| use std::str::FromStr; | ||||
| use std::time::SystemTime; | ||||
| use std::time::UNIX_EPOCH; | ||||
| 
 | ||||
| @ -23,13 +24,13 @@ pub struct Id { | ||||
|     value: String, | ||||
| } | ||||
| 
 | ||||
| impl Id { | ||||
|     pub fn from_string(value: String) -> Self { | ||||
|         Self { value } | ||||
|     } | ||||
| impl FromStr for Id { | ||||
|     type Err = (); | ||||
| 
 | ||||
|     pub fn from_str(value: &str) -> Self { | ||||
|         Self::from_string(value.to_string()) | ||||
|     fn from_str(s: &str) -> Result<Self, Self::Err> { | ||||
|         Ok(Id { | ||||
|             value: s.to_string(), | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -47,7 +47,7 @@ impl serde::Serialize for Version { | ||||
| 
 | ||||
| impl std::fmt::Display for Version { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
|         return self.value.fmt(f); | ||||
|         self.value.fmt(f) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -35,10 +35,9 @@ impl PhysicalHost { | ||||
| 
 | ||||
|     pub fn cluster_mac(&self) -> MacAddress { | ||||
|         self.network | ||||
|             .get(0) | ||||
|             .first() | ||||
|             .expect("Cluster physical host should have a network interface") | ||||
|             .mac_address | ||||
|             .clone() | ||||
|     } | ||||
| 
 | ||||
|     pub fn cpu(mut self, cpu_count: Option<u64>) -> Self { | ||||
|  | ||||
| @ -70,10 +70,7 @@ impl<T: Topology> Maestro<T> { | ||||
|     fn is_topology_initialized(&self) -> bool { | ||||
|         let result = self.topology_preparation_result.lock().unwrap(); | ||||
|         if let Some(outcome) = result.as_ref() { | ||||
|             match outcome.status { | ||||
|                 InterpretStatus::SUCCESS => return true, | ||||
|                 _ => return false, | ||||
|             } | ||||
|             matches!(outcome.status, InterpretStatus::SUCCESS) | ||||
|         } else { | ||||
|             false | ||||
|         } | ||||
|  | ||||
| @ -16,7 +16,7 @@ pub trait SerializeScore<T: Topology> { | ||||
|     fn serialize(&self) -> Value; | ||||
| } | ||||
| 
 | ||||
| impl<'de, S, T> SerializeScore<T> for S | ||||
| impl<S, T> SerializeScore<T> for S | ||||
| where | ||||
|     T: Topology, | ||||
|     S: Score<T> + Serialize, | ||||
| @ -24,7 +24,7 @@ where | ||||
|     fn serialize(&self) -> Value { | ||||
|         // TODO not sure if this is the right place to handle the error or it should bubble
 | ||||
|         // up?
 | ||||
|         serde_value::to_value(&self).expect("Score should serialize successfully") | ||||
|         serde_value::to_value(self).expect("Score should serialize successfully") | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -1,5 +1,4 @@ | ||||
| use derive_new::new; | ||||
| use futures_util::StreamExt; | ||||
| use k8s_openapi::{ | ||||
|     ClusterResourceScope, NamespaceResourceScope, | ||||
|     api::{apps::v1::Deployment, core::v1::Pod}, | ||||
| @ -18,7 +17,7 @@ use kube::{ | ||||
| }; | ||||
| use log::{debug, error, trace}; | ||||
| use serde::{Serialize, de::DeserializeOwned}; | ||||
| use similar::{DiffableStr, TextDiff}; | ||||
| use similar::TextDiff; | ||||
| 
 | ||||
| #[derive(new, Clone)] | ||||
| pub struct K8sClient { | ||||
| @ -67,13 +66,13 @@ impl K8sClient { | ||||
|         } | ||||
| 
 | ||||
|         let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed()); | ||||
|         let t = if let Some(t) = timeout { t } else { 300 }; | ||||
|         let t = timeout.unwrap_or(300); | ||||
|         let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await; | ||||
| 
 | ||||
|         if let Ok(r) = res { | ||||
|             return Ok(()); | ||||
|         if res.is_ok() { | ||||
|             Ok(()) | ||||
|         } else { | ||||
|             return Err("timed out while waiting for deployment".to_string()); | ||||
|             Err("timed out while waiting for deployment".to_string()) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -112,7 +111,7 @@ impl K8sClient { | ||||
|             .await; | ||||
| 
 | ||||
|         match res { | ||||
|             Err(e) => return Err(e.to_string()), | ||||
|             Err(e) => Err(e.to_string()), | ||||
|             Ok(mut process) => { | ||||
|                 let status = process | ||||
|                     .take_status() | ||||
| @ -122,13 +121,9 @@ impl K8sClient { | ||||
| 
 | ||||
|                 if let Some(s) = status.status { | ||||
|                     debug!("Status: {}", s); | ||||
|                     if s == "Success" { | ||||
|                         return Ok(()); | ||||
|                     if s == "Success" { Ok(()) } else { Err(s) } | ||||
|                 } else { | ||||
|                         return Err(s); | ||||
|                     } | ||||
|                 } else { | ||||
|                     return Err("Couldn't get inner status of pod exec".to_string()); | ||||
|                     Err("Couldn't get inner status of pod exec".to_string()) | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @ -169,8 +164,9 @@ impl K8sClient { | ||||
|                     trace!("Received current value {current:#?}"); | ||||
|                     // The resource exists, so we calculate and display a diff.
 | ||||
|                     println!("\nPerforming dry-run for resource: '{}'", name); | ||||
|                     let mut current_yaml = serde_yaml::to_value(¤t) | ||||
|                         .expect(&format!("Could not serialize current value : {current:#?}")); | ||||
|                     let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| { | ||||
|                         panic!("Could not serialize current value : {current:#?}") | ||||
|                     }); | ||||
|                     if current_yaml.is_mapping() && current_yaml.get("status").is_some() { | ||||
|                         let map = current_yaml.as_mapping_mut().unwrap(); | ||||
|                         let removed = map.remove_entry("status"); | ||||
| @ -237,7 +233,7 @@ impl K8sClient { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error> | ||||
|     pub async fn apply_many<K>(&self, resource: &[K], ns: Option<&str>) -> Result<Vec<K>, Error> | ||||
|     where | ||||
|         K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize, | ||||
|         <K as Resource>::Scope: ApplyStrategy<K>, | ||||
| @ -253,7 +249,7 @@ impl K8sClient { | ||||
| 
 | ||||
|     pub async fn apply_yaml_many( | ||||
|         &self, | ||||
|         yaml: &Vec<serde_yaml::Value>, | ||||
|         #[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>, | ||||
|         ns: Option<&str>, | ||||
|     ) -> Result<(), Error> { | ||||
|         for y in yaml.iter() { | ||||
|  | ||||
| @ -87,7 +87,9 @@ impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology { | ||||
|             .execute(inventory, self) | ||||
|             .await?; | ||||
| 
 | ||||
|         Ok(Outcome::success(format!("No action, working on cluster  "))) | ||||
|         Ok(Outcome::success( | ||||
|             "No action, working on cluster  ".to_string(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -124,7 +126,7 @@ impl K8sAnywhereTopology { | ||||
|     ) -> K8sPrometheusCRDAlertingScore { | ||||
|         K8sPrometheusCRDAlertingScore { | ||||
|             sender, | ||||
|             receivers: receivers.unwrap_or_else(Vec::new), | ||||
|             receivers: receivers.unwrap_or_default(), | ||||
|             service_monitors: vec![], | ||||
|             prometheus_rules: vec![], | ||||
|         } | ||||
| @ -176,7 +178,7 @@ impl K8sAnywhereTopology { | ||||
|         } else { | ||||
|             if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig { | ||||
|                 debug!("Loading kubeconfig {kubeconfig}"); | ||||
|                 match self.try_load_kubeconfig(&kubeconfig).await { | ||||
|                 match self.try_load_kubeconfig(kubeconfig).await { | ||||
|                     Some(client) => { | ||||
|                         return Ok(Some(K8sState { | ||||
|                             client: Arc::new(client), | ||||
| @ -232,7 +234,7 @@ impl K8sAnywhereTopology { | ||||
|     } | ||||
| 
 | ||||
|     async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> { | ||||
|         if let Some(_) = self.tenant_manager.get() { | ||||
|         if self.tenant_manager.get().is_some() { | ||||
|             return Ok(()); | ||||
|         } | ||||
| 
 | ||||
| @ -366,7 +368,7 @@ impl Topology for K8sAnywhereTopology { | ||||
| 
 | ||||
|         self.ensure_k8s_tenant_manager() | ||||
|             .await | ||||
|             .map_err(|e| InterpretError::new(e))?; | ||||
|             .map_err(InterpretError::new)?; | ||||
| 
 | ||||
|         match self.is_helm_available() { | ||||
|             Ok(()) => Ok(Outcome::success(format!( | ||||
|  | ||||
| @ -88,7 +88,7 @@ impl Serialize for Url { | ||||
|     { | ||||
|         match self { | ||||
|             Url::LocalFolder(path) => serializer.serialize_str(path), | ||||
|             Url::Url(url) => serializer.serialize_str(&url.as_str()), | ||||
|             Url::Url(url) => serializer.serialize_str(url.as_str()), | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -27,11 +27,11 @@ pub struct UnmanagedRouter { | ||||
| 
 | ||||
| impl Router for UnmanagedRouter { | ||||
|     fn get_gateway(&self) -> IpAddress { | ||||
|         self.gateway.clone() | ||||
|         self.gateway | ||||
|     } | ||||
| 
 | ||||
|     fn get_cidr(&self) -> Ipv4Cidr { | ||||
|         self.cidr.clone() | ||||
|         self.cidr | ||||
|     } | ||||
| 
 | ||||
|     fn get_host(&self) -> LogicalHost { | ||||
|  | ||||
| @ -309,19 +309,19 @@ impl K8sTenantManager { | ||||
|                 let ports: Option<Vec<NetworkPolicyPort>> = | ||||
|                     c.1.as_ref().map(|spec| match &spec.data { | ||||
|                         super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort { | ||||
|                             port: Some(IntOrString::Int(port.clone().into())), | ||||
|                             port: Some(IntOrString::Int((*port).into())), | ||||
|                             ..Default::default() | ||||
|                         }], | ||||
|                         super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort { | ||||
|                             port: Some(IntOrString::Int(start.clone().into())), | ||||
|                             end_port: Some(end.clone().into()), | ||||
|                             port: Some(IntOrString::Int((*start).into())), | ||||
|                             end_port: Some((*end).into()), | ||||
|                             protocol: None, // Not currently supported by Harmony
 | ||||
|                         }], | ||||
| 
 | ||||
|                         super::PortSpecData::ListOfPorts(items) => items | ||||
|                             .iter() | ||||
|                             .map(|i| NetworkPolicyPort { | ||||
|                                 port: Some(IntOrString::Int(i.clone().into())), | ||||
|                                 port: Some(IntOrString::Int((*i).into())), | ||||
|                                 ..Default::default() | ||||
|                             }) | ||||
|                             .collect(), | ||||
| @ -366,19 +366,19 @@ impl K8sTenantManager { | ||||
|                 let ports: Option<Vec<NetworkPolicyPort>> = | ||||
|                     c.1.as_ref().map(|spec| match &spec.data { | ||||
|                         super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort { | ||||
|                             port: Some(IntOrString::Int(port.clone().into())), | ||||
|                             port: Some(IntOrString::Int((*port).into())), | ||||
|                             ..Default::default() | ||||
|                         }], | ||||
|                         super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort { | ||||
|                             port: Some(IntOrString::Int(start.clone().into())), | ||||
|                             end_port: Some(end.clone().into()), | ||||
|                             port: Some(IntOrString::Int((*start).into())), | ||||
|                             end_port: Some((*end).into()), | ||||
|                             protocol: None, // Not currently supported by Harmony
 | ||||
|                         }], | ||||
| 
 | ||||
|                         super::PortSpecData::ListOfPorts(items) => items | ||||
|                             .iter() | ||||
|                             .map(|i| NetworkPolicyPort { | ||||
|                                 port: Some(IntOrString::Int(i.clone().into())), | ||||
|                                 port: Some(IntOrString::Int((*i).into())), | ||||
|                                 ..Default::default() | ||||
|                             }) | ||||
|                             .collect(), | ||||
|  | ||||
| @ -60,7 +60,7 @@ impl DnsServer for OPNSenseFirewall { | ||||
|     } | ||||
| 
 | ||||
|     fn get_ip(&self) -> IpAddress { | ||||
|         OPNSenseFirewall::get_ip(&self) | ||||
|         OPNSenseFirewall::get_ip(self) | ||||
|     } | ||||
| 
 | ||||
|     fn get_host(&self) -> LogicalHost { | ||||
|  | ||||
| @ -48,7 +48,7 @@ impl HttpServer for OPNSenseFirewall { | ||||
|     async fn ensure_initialized(&self) -> Result<(), ExecutorError> { | ||||
|         let mut config = self.opnsense_config.write().await; | ||||
|         let caddy = config.caddy(); | ||||
|         if let None = caddy.get_full_config() { | ||||
|         if caddy.get_full_config().is_none() { | ||||
|             info!("Http config not available in opnsense config, installing package"); | ||||
|             config.install_package("os-caddy").await.map_err(|e| { | ||||
|                 ExecutorError::UnexpectedError(format!( | ||||
|  | ||||
| @ -121,10 +121,12 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( | ||||
| 
 | ||||
|             LoadBalancerService { | ||||
|                 backend_servers, | ||||
|                 listening_port: frontend.bind.parse().expect(&format!( | ||||
|                 listening_port: frontend.bind.parse().unwrap_or_else(|_| { | ||||
|                     panic!( | ||||
|                         "HAProxy frontend address should be a valid SocketAddr, got {}", | ||||
|                         frontend.bind | ||||
|                 )), | ||||
|                     ) | ||||
|                 }), | ||||
|                 health_check, | ||||
|             } | ||||
|         }) | ||||
| @ -167,28 +169,28 @@ pub(crate) fn get_health_check_for_backend( | ||||
|         None => return None, | ||||
|     }; | ||||
| 
 | ||||
|     let haproxy_health_check = match haproxy | ||||
|     let haproxy_health_check = haproxy | ||||
|         .healthchecks | ||||
|         .healthchecks | ||||
|         .iter() | ||||
|         .find(|h| &h.uuid == health_check_uuid) | ||||
|     { | ||||
|         Some(health_check) => health_check, | ||||
|         None => return None, | ||||
|     }; | ||||
|         .find(|h| &h.uuid == health_check_uuid)?; | ||||
| 
 | ||||
|     let binding = haproxy_health_check.health_check_type.to_uppercase(); | ||||
|     let uppercase = binding.as_str(); | ||||
|     match uppercase { | ||||
|         "TCP" => { | ||||
|             if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() { | ||||
|                 if checkport.len() > 0 { | ||||
|                     return Some(HealthCheck::TCP(Some(checkport.parse().expect(&format!( | ||||
|                 if !checkport.is_empty() { | ||||
|                     return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else( | ||||
|                         |_| { | ||||
|                             panic!( | ||||
|                                 "HAProxy check port should be a valid port number, got {checkport}" | ||||
|                     ))))); | ||||
|                             ) | ||||
|                         }, | ||||
|                     )))); | ||||
|                 } | ||||
|             } | ||||
|             return Some(HealthCheck::TCP(None)); | ||||
|             Some(HealthCheck::TCP(None)) | ||||
|         } | ||||
|         "HTTP" => { | ||||
|             let path: String = haproxy_health_check | ||||
| @ -355,16 +357,13 @@ mod tests { | ||||
| 
 | ||||
|         // Create an HAProxy instance with servers
 | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let mut server = HAProxyServer::default(); | ||||
|         server.uuid = "server1".to_string(); | ||||
|         server.address = "192.168.1.1".to_string(); | ||||
|         server.port = 80; | ||||
| 
 | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "192.168.1.1".to_string(), | ||||
|             port: 80, | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
|         let mut server = HAProxyServer::default(); | ||||
|         server.uuid = "server3".to_string(); | ||||
|         server.address = "192.168.1.3".to_string(); | ||||
|         server.port = 8080; | ||||
| 
 | ||||
|         // Call the function
 | ||||
|         let result = get_servers_for_backend(&backend, &haproxy); | ||||
| @ -384,10 +383,12 @@ mod tests { | ||||
|         let backend = HAProxyBackend::default(); | ||||
|         // Create an HAProxy instance with servers
 | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let mut server = HAProxyServer::default(); | ||||
|         server.uuid = "server1".to_string(); | ||||
|         server.address = "192.168.1.1".to_string(); | ||||
|         server.port = 80; | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "192.168.1.1".to_string(), | ||||
|             port: 80, | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
|         // Call the function
 | ||||
|         let result = get_servers_for_backend(&backend, &haproxy); | ||||
| @ -402,10 +403,12 @@ mod tests { | ||||
|         backend.linked_servers.content = Some("server4,server5".to_string()); | ||||
|         // Create an HAProxy instance with servers
 | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let mut server = HAProxyServer::default(); | ||||
|         server.uuid = "server1".to_string(); | ||||
|         server.address = "192.168.1.1".to_string(); | ||||
|         server.port = 80; | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "192.168.1.1".to_string(), | ||||
|             port: 80, | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
|         // Call the function
 | ||||
|         let result = get_servers_for_backend(&backend, &haproxy); | ||||
| @ -416,20 +419,28 @@ mod tests { | ||||
|     #[test] | ||||
|     fn test_get_servers_for_backend_multiple_linked_servers() { | ||||
|         // Create a backend with multiple linked servers
 | ||||
|         #[allow(clippy::field_reassign_with_default)] | ||||
|         let mut backend = HAProxyBackend::default(); | ||||
|         backend.linked_servers.content = Some("server1,server2".to_string()); | ||||
|         //
 | ||||
|         // Create an HAProxy instance with matching servers
 | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let mut server = HAProxyServer::default(); | ||||
|         server.uuid = "server1".to_string(); | ||||
|         server.address = "some-hostname.test.mcd".to_string(); | ||||
|         server.port = 80; | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "some-hostname.test.mcd".to_string(), | ||||
|             port: 80, | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
|         let mut server = HAProxyServer::default(); | ||||
|         server.uuid = "server2".to_string(); | ||||
|         server.address = "192.168.1.2".to_string(); | ||||
|         server.port = 8080; | ||||
| 
 | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server2".to_string(), | ||||
|             address: "192.168.1.2".to_string(), | ||||
|             port: 8080, | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
| 
 | ||||
|         // Call the function
 | ||||
|         let result = get_servers_for_backend(&backend, &haproxy); | ||||
|         // Check the result
 | ||||
|  | ||||
| @ -58,7 +58,7 @@ impl TftpServer for OPNSenseFirewall { | ||||
|     async fn ensure_initialized(&self) -> Result<(), ExecutorError> { | ||||
|         let mut config = self.opnsense_config.write().await; | ||||
|         let tftp = config.tftp(); | ||||
|         if let None = tftp.get_full_config() { | ||||
|         if tftp.get_full_config().is_none() { | ||||
|             info!("Tftp config not available in opnsense config, installing package"); | ||||
|             config.install_package("os-tftp").await.map_err(|e| { | ||||
|                 ExecutorError::UnexpectedError(format!( | ||||
|  | ||||
| @ -13,7 +13,7 @@ pub trait ApplicationFeature<T: Topology>: | ||||
|     fn name(&self) -> String; | ||||
| } | ||||
| 
 | ||||
| trait ApplicationFeatureClone<T: Topology> { | ||||
| pub trait ApplicationFeatureClone<T: Topology> { | ||||
|     fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>; | ||||
| } | ||||
| 
 | ||||
| @ -27,7 +27,7 @@ where | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> { | ||||
|     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | ||||
|     fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> | ||||
|     where | ||||
|         S: serde::Serializer, | ||||
|     { | ||||
|  | ||||
| @ -184,12 +184,11 @@ impl ArgoApplication { | ||||
|     pub fn to_yaml(&self) -> serde_yaml::Value { | ||||
|         let name = &self.name; | ||||
|         let namespace = if let Some(ns) = self.namespace.as_ref() { | ||||
|             &ns | ||||
|             ns | ||||
|         } else { | ||||
|             "argocd" | ||||
|         }; | ||||
|         let project = &self.project; | ||||
|         let source = &self.source; | ||||
| 
 | ||||
|         let yaml_str = format!( | ||||
|             r#" | ||||
| @ -228,7 +227,7 @@ spec: | ||||
|             serde_yaml::to_value(&self.source).expect("couldn't serialize source to value"); | ||||
|         let sync_policy = serde_yaml::to_value(&self.sync_policy) | ||||
|             .expect("couldn't serialize sync_policy to value"); | ||||
|         let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit) | ||||
|         let revision_history_limit = serde_yaml::to_value(self.revision_history_limit) | ||||
|             .expect("couldn't serialize revision_history_limit to value"); | ||||
| 
 | ||||
|         spec.insert( | ||||
|  | ||||
| @ -10,7 +10,7 @@ use crate::{ | ||||
|     data::Version, | ||||
|     inventory::Inventory, | ||||
|     modules::application::{ | ||||
|         Application, ApplicationFeature, HelmPackage, OCICompliant, | ||||
|         ApplicationFeature, HelmPackage, OCICompliant, | ||||
|         features::{ArgoApplication, ArgoHelmScore}, | ||||
|     }, | ||||
|     score::Score, | ||||
|  | ||||
| @ -986,7 +986,7 @@ commitServer: | ||||
|     ); | ||||
| 
 | ||||
|     HelmChartScore { | ||||
|         namespace: Some(NonBlankString::from_str(&namespace).unwrap()), | ||||
|         namespace: Some(NonBlankString::from_str(namespace).unwrap()), | ||||
|         release_name: NonBlankString::from_str("argo-cd").unwrap(), | ||||
|         chart_name: NonBlankString::from_str("argo/argo-cd").unwrap(), | ||||
|         chart_version: Some(NonBlankString::from_str("8.1.2").unwrap()), | ||||
|  | ||||
| @ -81,7 +81,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application | ||||
| } | ||||
| 
 | ||||
| impl Serialize for dyn Application { | ||||
|     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | ||||
|     fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> | ||||
|     where | ||||
|         S: serde::Serializer, | ||||
|     { | ||||
|  | ||||
| @ -1,5 +1,5 @@ | ||||
| use std::fs; | ||||
| use std::path::PathBuf; | ||||
| use std::path::{Path, PathBuf}; | ||||
| use std::process; | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| @ -174,7 +174,7 @@ impl RustWebapp { | ||||
|             .platform("linux/x86_64"); | ||||
| 
 | ||||
|         let mut temp_tar_builder = tar::Builder::new(Vec::new()); | ||||
|         let _ = temp_tar_builder | ||||
|         temp_tar_builder | ||||
|             .append_dir_all("", self.project_root.clone()) | ||||
|             .unwrap(); | ||||
|         let archive = temp_tar_builder | ||||
| @ -530,10 +530,7 @@ spec: | ||||
|     } | ||||
| 
 | ||||
|     /// Packages a Helm chart directory into a .tgz file.
 | ||||
|     fn package_helm_chart( | ||||
|         &self, | ||||
|         chart_dir: &PathBuf, | ||||
|     ) -> Result<PathBuf, Box<dyn std::error::Error>> { | ||||
|     fn package_helm_chart(&self, chart_dir: &Path) -> Result<PathBuf, Box<dyn std::error::Error>> { | ||||
|         let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname"); | ||||
|         debug!( | ||||
|             "Launching `helm package {}` cli with CWD {}", | ||||
| @ -546,14 +543,13 @@ spec: | ||||
|         ); | ||||
|         let output = process::Command::new("helm") | ||||
|             .args(["package", chart_dirname.to_str().unwrap()]) | ||||
|             .current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
 | ||||
|             .current_dir(self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
 | ||||
|             .output()?; | ||||
| 
 | ||||
|         self.check_output(&output, "Failed to package Helm chart")?; | ||||
| 
 | ||||
|         // Helm prints the path of the created chart to stdout.
 | ||||
|         let tgz_name = String::from_utf8(output.stdout)? | ||||
|             .trim() | ||||
|             .split_whitespace() | ||||
|             .last() | ||||
|             .unwrap_or_default() | ||||
| @ -573,7 +569,7 @@ spec: | ||||
|     /// Pushes a packaged Helm chart to an OCI registry.
 | ||||
|     fn push_helm_chart( | ||||
|         &self, | ||||
|         packaged_chart_path: &PathBuf, | ||||
|         packaged_chart_path: &Path, | ||||
|     ) -> Result<String, Box<dyn std::error::Error>> { | ||||
|         // The chart name is the file stem of the .tgz file
 | ||||
|         let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap(); | ||||
|  | ||||
| @ -41,6 +41,6 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore { | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         format!("CertManagerHelmScore") | ||||
|         "CertManagerHelmScore".to_string() | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -111,7 +111,7 @@ impl DhcpInterpret { | ||||
| 
 | ||||
|         let boot_filename_outcome = match &self.score.boot_filename { | ||||
|             Some(boot_filename) => { | ||||
|                 dhcp_server.set_boot_filename(&boot_filename).await?; | ||||
|                 dhcp_server.set_boot_filename(boot_filename).await?; | ||||
|                 Outcome::new( | ||||
|                     InterpretStatus::SUCCESS, | ||||
|                     format!("Dhcp Interpret Set boot filename to {boot_filename}"), | ||||
| @ -122,7 +122,7 @@ impl DhcpInterpret { | ||||
| 
 | ||||
|         let filename_outcome = match &self.score.filename { | ||||
|             Some(filename) => { | ||||
|                 dhcp_server.set_filename(&filename).await?; | ||||
|                 dhcp_server.set_filename(filename).await?; | ||||
|                 Outcome::new( | ||||
|                     InterpretStatus::SUCCESS, | ||||
|                     format!("Dhcp Interpret Set filename to {filename}"), | ||||
| @ -133,7 +133,7 @@ impl DhcpInterpret { | ||||
| 
 | ||||
|         let filename64_outcome = match &self.score.filename64 { | ||||
|             Some(filename64) => { | ||||
|                 dhcp_server.set_filename64(&filename64).await?; | ||||
|                 dhcp_server.set_filename64(filename64).await?; | ||||
|                 Outcome::new( | ||||
|                     InterpretStatus::SUCCESS, | ||||
|                     format!("Dhcp Interpret Set filename64 to {filename64}"), | ||||
| @ -144,7 +144,7 @@ impl DhcpInterpret { | ||||
| 
 | ||||
|         let filenameipxe_outcome = match &self.score.filenameipxe { | ||||
|             Some(filenameipxe) => { | ||||
|                 dhcp_server.set_filenameipxe(&filenameipxe).await?; | ||||
|                 dhcp_server.set_filenameipxe(filenameipxe).await?; | ||||
|                 Outcome::new( | ||||
|                     InterpretStatus::SUCCESS, | ||||
|                     format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"), | ||||
| @ -209,7 +209,7 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret { | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!("Dhcp Interpret execution successful"), | ||||
|             "Dhcp Interpret execution successful".to_string(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -112,7 +112,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret { | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!("Dns Interpret execution successful"), | ||||
|             "Dns Interpret execution successful".to_string(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -90,14 +90,10 @@ impl HelmChartInterpret { | ||||
|         ); | ||||
| 
 | ||||
|         match add_output.status.success() { | ||||
|             true => { | ||||
|                 return Ok(()); | ||||
|             } | ||||
|             false => { | ||||
|                 return Err(InterpretError::new(format!( | ||||
|             true => Ok(()), | ||||
|             false => Err(InterpretError::new(format!( | ||||
|                 "Failed to add helm repository!\n{full_output}" | ||||
|                 ))); | ||||
|             } | ||||
|             ))), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -212,7 +208,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret { | ||||
|         } | ||||
| 
 | ||||
|         let res = helm_executor.install_or_upgrade( | ||||
|             &ns, | ||||
|             ns, | ||||
|             &self.score.release_name, | ||||
|             &self.score.chart_name, | ||||
|             self.score.chart_version.as_ref(), | ||||
|  | ||||
| @ -77,14 +77,11 @@ impl HelmCommandExecutor { | ||||
|             )?; | ||||
|         } | ||||
| 
 | ||||
|         let out = match self.clone().run_command( | ||||
|         let out = self.clone().run_command( | ||||
|             self.chart | ||||
|                 .clone() | ||||
|                 .helm_args(self.globals.chart_home.clone().unwrap()), | ||||
|         ) { | ||||
|             Ok(out) => out, | ||||
|             Err(e) => return Err(e), | ||||
|         }; | ||||
|         )?; | ||||
| 
 | ||||
|         // TODO: don't use unwrap here
 | ||||
|         let s = String::from_utf8(out.stdout).unwrap(); | ||||
| @ -98,14 +95,11 @@ impl HelmCommandExecutor { | ||||
|     } | ||||
| 
 | ||||
|     pub fn version(self) -> Result<String, std::io::Error> { | ||||
|         let out = match self.run_command(vec![ | ||||
|         let out = self.run_command(vec![ | ||||
|             "version".to_string(), | ||||
|             "-c".to_string(), | ||||
|             "--short".to_string(), | ||||
|         ]) { | ||||
|             Ok(out) => out, | ||||
|             Err(e) => return Err(e), | ||||
|         }; | ||||
|         ])?; | ||||
| 
 | ||||
|         // TODO: don't use unwrap
 | ||||
|         Ok(String::from_utf8(out.stdout).unwrap()) | ||||
| @ -129,15 +123,11 @@ impl HelmCommandExecutor { | ||||
|             None => PathBuf::from(TempDir::new()?.path()), | ||||
|         }; | ||||
| 
 | ||||
|         match self.chart.values_inline { | ||||
|             Some(yaml_str) => { | ||||
|                 let tf: TempFile; | ||||
|                 tf = temp_file::with_contents(yaml_str.as_bytes()); | ||||
|         if let Some(yaml_str) = self.chart.values_inline { | ||||
|             let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes()); | ||||
|             self.chart | ||||
|                 .additional_values_files | ||||
|                 .push(PathBuf::from(tf.path())); | ||||
|             } | ||||
|             None => (), | ||||
|         }; | ||||
| 
 | ||||
|         self.env.insert( | ||||
| @ -180,9 +170,9 @@ impl HelmChart { | ||||
|         match self.repo { | ||||
|             Some(r) => { | ||||
|                 if r.starts_with("oci://") { | ||||
|                     args.push(String::from( | ||||
|                     args.push( | ||||
|                         r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(), | ||||
|                     )); | ||||
|                     ); | ||||
|                 } else { | ||||
|                     args.push("--repo".to_string()); | ||||
|                     args.push(r.to_string()); | ||||
| @ -193,13 +183,10 @@ impl HelmChart { | ||||
|             None => args.push(self.name), | ||||
|         }; | ||||
| 
 | ||||
|         match self.version { | ||||
|             Some(v) => { | ||||
|         if let Some(v) = self.version { | ||||
|             args.push("--version".to_string()); | ||||
|             args.push(v.to_string()); | ||||
|         } | ||||
|             None => (), | ||||
|         } | ||||
| 
 | ||||
|         args | ||||
|     } | ||||
|  | ||||
| @ -1,7 +1,7 @@ | ||||
| use std::path::PathBuf; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use log::{debug, info}; | ||||
| use log::debug; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::{ | ||||
|  | ||||
| @ -135,6 +135,8 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret { | ||||
| 
 | ||||
|         info!("LAMP deployment_score {deployment_score:?}"); | ||||
| 
 | ||||
|         let ingress_path = ingress_path!("/"); | ||||
| 
 | ||||
|         let lamp_ingress = K8sIngressScore { | ||||
|             name: fqdn!("lamp-ingress"), | ||||
|             host: fqdn!("test"), | ||||
| @ -144,7 +146,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret { | ||||
|                     .as_str() | ||||
|             ), | ||||
|             port: 8080, | ||||
|             path: Some(ingress_path!("/")), | ||||
|             path: Some(ingress_path), | ||||
|             path_type: None, | ||||
|             namespace: self | ||||
|                 .get_namespace() | ||||
|  | ||||
| @ -18,7 +18,7 @@ use crate::{ | ||||
| #[async_trait] | ||||
| impl AlertRule<KubePrometheus> for AlertManagerRuleGroup { | ||||
|     async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> { | ||||
|         sender.install_rule(&self).await | ||||
|         sender.install_rule(self).await | ||||
|     } | ||||
|     fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> { | ||||
|         Box::new(self.clone()) | ||||
| @ -28,7 +28,7 @@ impl AlertRule<KubePrometheus> for AlertManagerRuleGroup { | ||||
| #[async_trait] | ||||
| impl AlertRule<Prometheus> for AlertManagerRuleGroup { | ||||
|     async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> { | ||||
|         sender.install_rule(&self).await | ||||
|         sender.install_rule(self).await | ||||
|     } | ||||
|     fn clone_box(&self) -> Box<dyn AlertRule<Prometheus>> { | ||||
|         Box::new(self.clone()) | ||||
|  | ||||
| @ -4,15 +4,14 @@ use std::str::FromStr; | ||||
| use crate::modules::helm::chart::HelmChartScore; | ||||
| 
 | ||||
| pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore { | ||||
|     let values = format!( | ||||
|         r#" | ||||
|     let values = r#" | ||||
| rbac: | ||||
|   namespaced: true | ||||
| sidecar: | ||||
|   dashboards: | ||||
|     enabled: true | ||||
|         "#
 | ||||
|     ); | ||||
|     .to_string(); | ||||
| 
 | ||||
|     HelmChartScore { | ||||
|         namespace: Some(NonBlankString::from_str(ns).unwrap()), | ||||
|  | ||||
| @ -1,7 +1,6 @@ | ||||
| use kube::CustomResource; | ||||
| use schemars::JsonSchema; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::collections::BTreeMap; | ||||
| 
 | ||||
| use super::crd_prometheuses::LabelSelector; | ||||
| 
 | ||||
|  | ||||
| @ -1,13 +1,8 @@ | ||||
| use std::collections::BTreeMap; | ||||
| 
 | ||||
| use crate::modules::{ | ||||
|     monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule, | ||||
|     prometheus::alerts::k8s::{ | ||||
| use crate::modules::prometheus::alerts::k8s::{ | ||||
|     deployment::alert_deployment_unavailable, | ||||
|     pod::{alert_container_restarting, alert_pod_not_ready, pod_failed}, | ||||
|     pvc::high_pvc_fill_rate_over_two_days, | ||||
|     service::alert_service_down, | ||||
|     }, | ||||
| }; | ||||
| 
 | ||||
| use super::crd_prometheus_rules::Rule; | ||||
|  | ||||
| @ -6,8 +6,6 @@ use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule; | ||||
| 
 | ||||
| use super::crd_default_rules::build_default_application_rules; | ||||
| 
 | ||||
| #[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)] | ||||
| #[kube(
 | ||||
|     group = "monitoring.coreos.com", | ||||
|  | ||||
| @ -1,11 +1,9 @@ | ||||
| use std::collections::{BTreeMap, HashMap}; | ||||
| use std::collections::HashMap; | ||||
| 
 | ||||
| use kube::{CustomResource, Resource, api::ObjectMeta}; | ||||
| use kube::CustomResource; | ||||
| use schemars::JsonSchema; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| use crate::interpret::InterpretError; | ||||
| 
 | ||||
| use crate::modules::monitoring::kube_prometheus::types::{ | ||||
|     HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector, | ||||
|     ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint, | ||||
| @ -50,7 +48,7 @@ pub struct ServiceMonitorSpec { | ||||
| 
 | ||||
| impl Default for ServiceMonitorSpec { | ||||
|     fn default() -> Self { | ||||
|         let mut labels = HashMap::new(); | ||||
|         let labels = HashMap::new(); | ||||
|         Self { | ||||
|             selector: Selector { | ||||
|                 match_labels: { labels }, | ||||
|  | ||||
| @ -27,6 +27,12 @@ pub struct KubePrometheusConfig { | ||||
|     pub alert_rules: Vec<AlertManagerAdditionalPromRules>, | ||||
|     pub additional_service_monitors: Vec<ServiceMonitor>, | ||||
| } | ||||
| impl Default for KubePrometheusConfig { | ||||
|     fn default() -> Self { | ||||
|         Self::new() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl KubePrometheusConfig { | ||||
|     pub fn new() -> Self { | ||||
|         Self { | ||||
|  | ||||
| @ -35,7 +35,7 @@ pub fn kube_prometheus_helm_chart_score( | ||||
|     let kube_proxy = config.kube_proxy.to_string(); | ||||
|     let kube_state_metrics = config.kube_state_metrics.to_string(); | ||||
|     let node_exporter = config.node_exporter.to_string(); | ||||
|     let prometheus_operator = config.prometheus_operator.to_string(); | ||||
|     let _prometheus_operator = config.prometheus_operator.to_string(); | ||||
|     let prometheus = config.prometheus.to_string(); | ||||
|     let resource_limit = Resources { | ||||
|         limits: Limits { | ||||
| @ -64,7 +64,7 @@ pub fn kube_prometheus_helm_chart_score( | ||||
|             indent_lines(&yaml, indent_level + 2) | ||||
|         ) | ||||
|     } | ||||
|     let resource_section = resource_block(&resource_limit, 2); | ||||
|     let _resource_section = resource_block(&resource_limit, 2); | ||||
| 
 | ||||
|     let mut values = format!( | ||||
|         r#" | ||||
|  | ||||
| @ -55,6 +55,12 @@ pub struct KubePrometheus { | ||||
|     pub config: Arc<Mutex<KubePrometheusConfig>>, | ||||
| } | ||||
| 
 | ||||
| impl Default for KubePrometheus { | ||||
|     fn default() -> Self { | ||||
|         Self::new() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl KubePrometheus { | ||||
|     pub fn new() -> Self { | ||||
|         Self { | ||||
|  | ||||
| @ -1,2 +1,3 @@ | ||||
| pub mod helm; | ||||
| #[allow(clippy::module_inception)] | ||||
| pub mod ntfy; | ||||
|  | ||||
| @ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore { | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         format!("Ntfy") | ||||
|         "Ntfy".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -39,31 +39,21 @@ pub struct NtfyInterpret { | ||||
| 
 | ||||
| #[derive(Debug, EnumString, Display)] | ||||
| enum NtfyAccessMode { | ||||
|     #[strum(serialize = "read-write", serialize = "rw", to_string = "read-write")] | ||||
|     #[strum(serialize = "read-write", serialize = "rw")] | ||||
|     ReadWrite, | ||||
|     #[strum(
 | ||||
|         serialize = "read-only", | ||||
|         serialize = "ro", | ||||
|         serialize = "read", | ||||
|         to_string = "read-only" | ||||
|     )] | ||||
|     #[strum(serialize = "read-only", serialize = "ro", serialize = "read")] | ||||
|     ReadOnly, | ||||
|     #[strum(
 | ||||
|         serialize = "write-only", | ||||
|         serialize = "wo", | ||||
|         serialize = "write", | ||||
|         to_string = "write-only" | ||||
|     )] | ||||
|     #[strum(serialize = "write-only", serialize = "wo", serialize = "write")] | ||||
|     WriteOnly, | ||||
|     #[strum(serialize = "none", to_string = "deny")] | ||||
|     #[strum(serialize = "deny", serialize = "none")] | ||||
|     Deny, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, EnumString, Display)] | ||||
| enum NtfyRole { | ||||
|     #[strum(serialize = "user", to_string = "user")] | ||||
|     #[strum(serialize = "user")] | ||||
|     User, | ||||
|     #[strum(serialize = "admin", to_string = "admin")] | ||||
|     #[strum(serialize = "admin")] | ||||
|     Admin, | ||||
| } | ||||
| 
 | ||||
| @ -95,28 +85,6 @@ impl NtfyInterpret { | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn set_access( | ||||
|         &self, | ||||
|         k8s_client: Arc<K8sClient>, | ||||
|         username: &str, | ||||
|         topic: &str, | ||||
|         mode: NtfyAccessMode, | ||||
|     ) -> Result<(), String> { | ||||
|         k8s_client | ||||
|             .exec_app( | ||||
|                 "ntfy".to_string(), | ||||
|                 Some(&self.score.namespace), | ||||
|                 vec![ | ||||
|                     "sh", | ||||
|                     "-c", | ||||
|                     format!("ntfy access {username} {topic} {mode}").as_str(), | ||||
|                 ], | ||||
|             ) | ||||
|             .await?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
 | ||||
| @ -141,7 +109,7 @@ impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret { | ||||
|         client | ||||
|             .wait_until_deployment_ready( | ||||
|                 "ntfy".to_string(), | ||||
|                 Some(&self.score.namespace.as_str()), | ||||
|                 Some(self.score.namespace.as_str()), | ||||
|                 None, | ||||
|             ) | ||||
|             .await?; | ||||
|  | ||||
| @ -1,3 +1,4 @@ | ||||
| pub mod helm; | ||||
| #[allow(clippy::module_inception)] | ||||
| pub mod prometheus; | ||||
| pub mod prometheus_config; | ||||
|  | ||||
| @ -37,6 +37,12 @@ impl AlertSender for Prometheus { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Default for Prometheus { | ||||
|     fn default() -> Self { | ||||
|         Self::new() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Prometheus { | ||||
|     pub fn new() -> Self { | ||||
|         Self { | ||||
| @ -114,9 +120,9 @@ impl Prometheus { | ||||
|                 .execute(inventory, topology) | ||||
|                 .await | ||||
|         } else { | ||||
|             Err(InterpretError::new(format!( | ||||
|                 "could not install grafana, missing namespace", | ||||
|             ))) | ||||
|             Err(InterpretError::new( | ||||
|                 "could not install grafana, missing namespace".to_string(), | ||||
|             )) | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -16,6 +16,12 @@ pub struct PrometheusConfig { | ||||
|     pub additional_service_monitors: Vec<ServiceMonitor>, | ||||
| } | ||||
| 
 | ||||
| impl Default for PrometheusConfig { | ||||
|     fn default() -> Self { | ||||
|         Self::new() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl PrometheusConfig { | ||||
|     pub fn new() -> Self { | ||||
|         Self { | ||||
|  | ||||
| @ -32,7 +32,7 @@ impl OKDBootstrapDhcpScore { | ||||
|             logical_host: topology.bootstrap_host.clone(), | ||||
|             physical_host: inventory | ||||
|                 .worker_host | ||||
|                 .get(0) | ||||
|                 .first() | ||||
|                 .expect("Should have at least one worker to be used as bootstrap node") | ||||
|                 .clone(), | ||||
|         }); | ||||
|  | ||||
| @ -6,6 +6,12 @@ pub struct OKDUpgradeScore { | ||||
|     _target_version: Version, | ||||
| } | ||||
| 
 | ||||
| impl Default for OKDUpgradeScore { | ||||
|     fn default() -> Self { | ||||
|         Self::new() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl OKDUpgradeScore { | ||||
|     pub fn new() -> Self { | ||||
|         Self { | ||||
|  | ||||
| @ -93,9 +93,9 @@ impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> I | ||||
|         self.install_rules(&self.prometheus_rules, &client).await?; | ||||
|         self.install_monitors(self.service_monitors.clone(), &client) | ||||
|             .await?; | ||||
|         Ok(Outcome::success(format!( | ||||
|             "deployed application monitoring composants" | ||||
|         ))) | ||||
|         Ok(Outcome::success( | ||||
|             "deployed application monitoring composants".to_string(), | ||||
|         )) | ||||
|     } | ||||
| 
 | ||||
|     fn get_name(&self) -> InterpretName { | ||||
| @ -415,7 +415,7 @@ impl K8sPrometheusCRDAlertingInterpret { | ||||
| 
 | ||||
|     async fn install_rules( | ||||
|         &self, | ||||
|         rules: &Vec<RuleGroup>, | ||||
|         #[allow(clippy::ptr_arg)] rules: &Vec<RuleGroup>, | ||||
|         client: &Arc<K8sClient>, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let mut prom_rule_spec = PrometheusRuleSpec { | ||||
| @ -423,7 +423,7 @@ impl K8sPrometheusCRDAlertingInterpret { | ||||
|         }; | ||||
| 
 | ||||
|         let default_rules_group = RuleGroup { | ||||
|             name: format!("default-rules"), | ||||
|             name: "default-rules".to_string(), | ||||
|             rules: build_default_application_rules(), | ||||
|         }; | ||||
| 
 | ||||
|  | ||||
| @ -1,3 +1,4 @@ | ||||
| pub mod alerts; | ||||
| pub mod k8s_prometheus_alerting_score; | ||||
| #[allow(clippy::module_inception)] | ||||
| pub mod prometheus; | ||||
|  | ||||
| @ -2,7 +2,7 @@ use harmony::instrumentation::{self, HarmonyEvent}; | ||||
| use indicatif::{MultiProgress, ProgressBar}; | ||||
| use indicatif_log_bridge::LogWrapper; | ||||
| use std::{ | ||||
|     collections::{HashMap, hash_map}, | ||||
|     collections::HashMap, | ||||
|     sync::{Arc, Mutex}, | ||||
| }; | ||||
| 
 | ||||
|  | ||||
| @ -11,8 +11,6 @@ pub mod progress; | ||||
| pub mod theme; | ||||
| 
 | ||||
| #[cfg(feature = "tui")] | ||||
| use harmony_tui; | ||||
| 
 | ||||
| #[derive(Parser, Debug)] | ||||
| #[command(version, about, long_about = None)] | ||||
| pub struct Args { | ||||
| @ -73,7 +71,7 @@ fn maestro_scores_filter<T: Topology>( | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     return scores_vec; | ||||
|     scores_vec | ||||
| } | ||||
| 
 | ||||
| // TODO: consider adding doctest for this function
 | ||||
| @ -83,7 +81,7 @@ fn list_scores_with_index<T: Topology>(scores_vec: &Vec<Box<dyn Score<T>>>) -> S | ||||
|         let name = s.name(); | ||||
|         display_str.push_str(&format!("\n{i}: {name}")); | ||||
|     } | ||||
|     return display_str; | ||||
|     display_str | ||||
| } | ||||
| 
 | ||||
| pub async fn run<T: Topology + Send + Sync + 'static>( | ||||
| @ -126,7 +124,7 @@ async fn init<T: Topology + Send + Sync + 'static>( | ||||
| 
 | ||||
|     let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number); | ||||
| 
 | ||||
|     if scores_vec.len() == 0 { | ||||
|     if scores_vec.is_empty() { | ||||
|         return Err("No score found".into()); | ||||
|     } | ||||
| 
 | ||||
| @ -265,7 +263,7 @@ mod test { | ||||
| 
 | ||||
|         assert!( | ||||
|             maestro | ||||
|                 .interpret(res.get(0).unwrap().clone_box()) | ||||
|                 .interpret(res.first().unwrap().clone_box()) | ||||
|                 .await | ||||
|                 .is_ok() | ||||
|         ); | ||||
| @ -281,7 +279,7 @@ mod test { | ||||
| 
 | ||||
|         assert!( | ||||
|             maestro | ||||
|                 .interpret(res.get(0).unwrap().clone_box()) | ||||
|                 .interpret(res.first().unwrap().clone_box()) | ||||
|                 .await | ||||
|                 .is_err() | ||||
|         ); | ||||
| @ -297,7 +295,7 @@ mod test { | ||||
| 
 | ||||
|         assert!( | ||||
|             maestro | ||||
|                 .interpret(res.get(0).unwrap().clone_box()) | ||||
|                 .interpret(res.first().unwrap().clone_box()) | ||||
|                 .await | ||||
|                 .is_ok() | ||||
|         ); | ||||
| @ -319,7 +317,7 @@ mod test { | ||||
| 
 | ||||
|         assert!( | ||||
|             maestro | ||||
|                 .interpret(res.get(0).unwrap().clone_box()) | ||||
|                 .interpret(res.first().unwrap().clone_box()) | ||||
|                 .await | ||||
|                 .is_ok() | ||||
|         ); | ||||
| @ -331,6 +329,6 @@ mod test { | ||||
| 
 | ||||
|         let res = crate::maestro_scores_filter(&maestro, false, None, 11); | ||||
| 
 | ||||
|         assert!(res.len() == 0); | ||||
|         assert!(res.is_empty()); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -95,7 +95,7 @@ pub async fn handle_events() { | ||||
|                         )); | ||||
|                         (*progresses_guard).insert(PROGRESS_DEPLOYMENT.to_string(), multi_progress); | ||||
|                     } | ||||
|                     HarmonyComposerEvent::DeploymentCompleted { details } => println!("\n"), | ||||
|                     HarmonyComposerEvent::DeploymentCompleted => println!("\n"), | ||||
|                     HarmonyComposerEvent::Shutdown => { | ||||
|                         for (_, progresses) in (*progresses_guard).iter() { | ||||
|                             progresses.clear().unwrap(); | ||||
|  | ||||
| @ -11,7 +11,7 @@ pub enum HarmonyComposerEvent { | ||||
|     ProjectCompiled, | ||||
|     ProjectCompilationFailed { details: String }, | ||||
|     DeploymentStarted { target: String }, | ||||
|     DeploymentCompleted { details: String }, | ||||
|     DeploymentCompleted, | ||||
|     Shutdown, | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -80,14 +80,13 @@ async fn main() { | ||||
|     instrumentation::instrument(HarmonyComposerEvent::ProjectInitializationStarted).unwrap(); | ||||
| 
 | ||||
|     let harmony_bin_path: PathBuf = match harmony_path { | ||||
|         true => { | ||||
|             compile_harmony( | ||||
|         true => compile_harmony( | ||||
|             cli_args.compile_method, | ||||
|             cli_args.compile_platform, | ||||
|             cli_args.harmony_path.clone(), | ||||
|         ) | ||||
|         .await | ||||
|         } | ||||
|         .expect("couldn't compile harmony"), | ||||
|         false => todo!("implement autodetect code"), | ||||
|     }; | ||||
| 
 | ||||
| @ -145,10 +144,9 @@ async fn main() { | ||||
|                 .expect("failed to run harmony deploy"); | ||||
| 
 | ||||
|                 let deploy_output = deploy.wait_with_output().unwrap(); | ||||
|                 instrumentation::instrument(HarmonyComposerEvent::DeploymentCompleted { | ||||
|                     details: String::from_utf8(deploy_output.stdout).unwrap(), | ||||
|                 }) | ||||
|                 .unwrap(); | ||||
|                 debug!("{}", String::from_utf8(deploy_output.stdout).unwrap()); | ||||
| 
 | ||||
|                 instrumentation::instrument(HarmonyComposerEvent::DeploymentCompleted).unwrap(); | ||||
|             } | ||||
|             Commands::All(_args) => todo!( | ||||
|                 "take all previous match arms and turn them into separate functions, and call them all one after the other" | ||||
| @ -173,7 +171,7 @@ async fn compile_harmony( | ||||
|     method: Option<CompileMethod>, | ||||
|     platform: Option<String>, | ||||
|     harmony_location: String, | ||||
| ) -> PathBuf { | ||||
| ) -> Result<PathBuf, String> { | ||||
|     let platform = match platform { | ||||
|         Some(p) => p, | ||||
|         None => current_platform::CURRENT_PLATFORM.to_string(), | ||||
| @ -203,6 +201,7 @@ async fn compile_harmony( | ||||
|                 details: "compiling project with cargo".to_string(), | ||||
|             }) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|             compile_cargo(platform, harmony_location).await | ||||
|         } | ||||
|         CompileMethod::Docker => { | ||||
| @ -210,16 +209,28 @@ async fn compile_harmony( | ||||
|                 details: "compiling project with docker".to_string(), | ||||
|             }) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|             compile_docker(platform, harmony_location).await | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     match path { | ||||
|         Ok(path) => { | ||||
|             instrumentation::instrument(HarmonyComposerEvent::ProjectCompiled).unwrap(); | ||||
|     path | ||||
|             Ok(path) | ||||
|         } | ||||
|         Err(err) => { | ||||
|             instrumentation::instrument(HarmonyComposerEvent::ProjectCompilationFailed { | ||||
|                 details: err.clone(), | ||||
|             }) | ||||
|             .unwrap(); | ||||
|             Err(err) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // TODO: make sure this works with cargo workspaces
 | ||||
| async fn compile_cargo(platform: String, harmony_location: String) -> PathBuf { | ||||
| async fn compile_cargo(platform: String, harmony_location: String) -> Result<PathBuf, String> { | ||||
|     let metadata = MetadataCommand::new() | ||||
|         .manifest_path(format!("{}/Cargo.toml", harmony_location)) | ||||
|         .exec() | ||||
| @ -268,7 +279,10 @@ async fn compile_cargo(platform: String, harmony_location: String) -> PathBuf { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     cargo_build.wait().expect("run cargo command failed"); | ||||
|     let res = cargo_build.wait(); //.expect("run cargo command failed");
 | ||||
|     if res.is_err() { | ||||
|         return Err("cargo build failed".into()); | ||||
|     } | ||||
| 
 | ||||
|     let bin = artifacts | ||||
|         .last() | ||||
| @ -286,10 +300,10 @@ async fn compile_cargo(platform: String, harmony_location: String) -> PathBuf { | ||||
|         let _copy_res = fs::copy(&bin, &bin_out).await; | ||||
|     } | ||||
| 
 | ||||
|     bin_out | ||||
|     Ok(bin_out) | ||||
| } | ||||
| 
 | ||||
| async fn compile_docker(platform: String, harmony_location: String) -> PathBuf { | ||||
| async fn compile_docker(platform: String, harmony_location: String) -> Result<PathBuf, String> { | ||||
|     let docker_client = | ||||
|         bollard::Docker::connect_with_local_defaults().expect("couldn't connect to docker"); | ||||
| 
 | ||||
| @ -305,7 +319,7 @@ async fn compile_docker(platform: String, harmony_location: String) -> PathBuf { | ||||
|         .await | ||||
|         .expect("list containers failed"); | ||||
| 
 | ||||
|     if containers.len() > 0 { | ||||
|     if !containers.is_empty() { | ||||
|         docker_client | ||||
|             .remove_container("harmony_build", None::<RemoveContainerOptions>) | ||||
|             .await | ||||
| @ -367,12 +381,12 @@ async fn compile_docker(platform: String, harmony_location: String) -> PathBuf { | ||||
|     } | ||||
| 
 | ||||
|     // wait until container is no longer running
 | ||||
|     while let Some(_) = wait.next().await {} | ||||
|     while (wait.next().await).is_some() {} | ||||
| 
 | ||||
|     // hack that should be cleaned up
 | ||||
|     if platform.contains("windows") { | ||||
|         return PathBuf::from(format!("{}/harmony.exe", harmony_location)); | ||||
|         Ok(PathBuf::from(format!("{}/harmony.exe", harmony_location))) | ||||
|     } else { | ||||
|         return PathBuf::from(format!("{}/harmony", harmony_location)); | ||||
|         Ok(PathBuf::from(format!("{}/harmony", harmony_location))) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -11,13 +11,13 @@ pub fn ip(input: TokenStream) -> TokenStream { | ||||
|     let input = parse_macro_input!(input as LitStr); | ||||
|     let ip_str = input.value(); | ||||
| 
 | ||||
|     if let Ok(_) = ip_str.parse::<std::net::Ipv4Addr>() { | ||||
|     if ip_str.parse::<std::net::Ipv4Addr>().is_ok() { | ||||
|         let expanded = | ||||
|             quote! { std::net::IpAddr::V4(#ip_str.parse::<std::net::Ipv4Addr>().unwrap()) }; | ||||
|         return TokenStream::from(expanded); | ||||
|     } | ||||
| 
 | ||||
|     if let Ok(_) = ip_str.parse::<std::net::Ipv6Addr>() { | ||||
|     if ip_str.parse::<std::net::Ipv6Addr>().is_ok() { | ||||
|         let expanded = | ||||
|             quote! { std::net::IpAddr::V6(#ip_str.parse::<std::net::Ipv6Addr>().unwrap()) }; | ||||
|         return TokenStream::from(expanded); | ||||
| @ -31,7 +31,7 @@ pub fn ipv4(input: TokenStream) -> TokenStream { | ||||
|     let input = parse_macro_input!(input as LitStr); | ||||
|     let ip_str = input.value(); | ||||
| 
 | ||||
|     if let Ok(_) = ip_str.parse::<std::net::Ipv4Addr>() { | ||||
|     if ip_str.parse::<std::net::Ipv4Addr>().is_ok() { | ||||
|         let expanded = quote! { #ip_str.parse::<std::net::Ipv4Addr>().unwrap() }; | ||||
|         return TokenStream::from(expanded); | ||||
|     } | ||||
| @ -127,7 +127,7 @@ pub fn ingress_path(input: TokenStream) -> TokenStream { | ||||
|     match path_str.starts_with("/") { | ||||
|         true => { | ||||
|             let expanded = quote! {(#path_str.to_string()) }; | ||||
|             return TokenStream::from(expanded); | ||||
|             TokenStream::from(expanded) | ||||
|         } | ||||
|         false => panic!("Invalid ingress path"), | ||||
|     } | ||||
| @ -138,7 +138,7 @@ pub fn cidrv4(input: TokenStream) -> TokenStream { | ||||
|     let input = parse_macro_input!(input as LitStr); | ||||
|     let cidr_str = input.value(); | ||||
| 
 | ||||
|     if let Ok(_) = cidr_str.parse::<cidr::Ipv4Cidr>() { | ||||
|     if cidr_str.parse::<cidr::Ipv4Cidr>().is_ok() { | ||||
|         let expanded = quote! { #cidr_str.parse::<cidr::Ipv4Cidr>().unwrap() }; | ||||
|         return TokenStream::from(expanded); | ||||
|     } | ||||
|  | ||||
| @ -14,9 +14,9 @@ use tokio::sync::mpsc; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| enum ExecutionState { | ||||
|     INITIATED, | ||||
|     RUNNING, | ||||
|     CANCELED, | ||||
|     Initiated, | ||||
|     Running, | ||||
|     Canceled, | ||||
| } | ||||
| 
 | ||||
| struct Execution<T: Topology> { | ||||
| @ -62,7 +62,7 @@ impl<T: Topology> ScoreListWidget<T> { | ||||
|     pub(crate) fn launch_execution(&mut self) { | ||||
|         if let Some(score) = self.get_selected_score() { | ||||
|             self.execution = Some(Execution { | ||||
|                 state: ExecutionState::INITIATED, | ||||
|                 state: ExecutionState::Initiated, | ||||
|                 score: score.clone_box(), | ||||
|             }); | ||||
|             info!("{}\n\nConfirm Execution (Press y/n)", score.name()); | ||||
| @ -106,7 +106,7 @@ impl<T: Topology> ScoreListWidget<T> { | ||||
|         if let Some(execution) = &mut self.execution { | ||||
|             match confirm { | ||||
|                 true => { | ||||
|                     execution.state = ExecutionState::RUNNING; | ||||
|                     execution.state = ExecutionState::Running; | ||||
|                     info!("Launch execution {execution}"); | ||||
|                     self.sender | ||||
|                         .send(HarmonyTuiEvent::LaunchScore(execution.score.clone_box())) | ||||
| @ -114,7 +114,7 @@ impl<T: Topology> ScoreListWidget<T> { | ||||
|                         .expect("Should be able to send message"); | ||||
|                 } | ||||
|                 false => { | ||||
|                     execution.state = ExecutionState::CANCELED; | ||||
|                     execution.state = ExecutionState::Canceled; | ||||
|                     info!("Execution cancelled"); | ||||
|                     self.clear_execution(); | ||||
|                 } | ||||
| @ -144,7 +144,11 @@ impl<T: Topology> Widget for &ScoreListWidget<T> { | ||||
|         Self: Sized, | ||||
|     { | ||||
|         let mut list_state = self.list_state.write().unwrap(); | ||||
|         let scores_items: Vec<ListItem<'_>> = self.scores.iter().map(score_to_list_item).collect(); | ||||
|         let scores_items: Vec<ListItem<'_>> = self | ||||
|             .scores | ||||
|             .iter() | ||||
|             .map(|score| ListItem::new(score.name())) | ||||
|             .collect(); | ||||
|         let list = List::new(scores_items) | ||||
|             .highlight_style(Style::new().bold().italic()) | ||||
|             .highlight_symbol("🠊 "); | ||||
| @ -152,7 +156,3 @@ impl<T: Topology> Widget for &ScoreListWidget<T> { | ||||
|         StatefulWidget::render(list, area, buf, &mut list_state) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn score_to_list_item<'a, T: Topology>(score: &'a Box<dyn Score<T>>) -> ListItem<'a> { | ||||
|     ListItem::new(score.name()) | ||||
| } | ||||
|  | ||||
| @ -2,7 +2,7 @@ mod downloadable_asset; | ||||
| use downloadable_asset::*; | ||||
| 
 | ||||
| use kube::Client; | ||||
| use log::{debug, warn}; | ||||
| use log::debug; | ||||
| use std::path::PathBuf; | ||||
| 
 | ||||
| const K3D_BIN_FILE_NAME: &str = "k3d"; | ||||
| @ -368,7 +368,7 @@ mod test { | ||||
|     async fn k3d_latest_release_should_get_latest() { | ||||
|         let dir = get_clean_test_directory(); | ||||
| 
 | ||||
|         assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false); | ||||
|         assert!(!dir.join(K3D_BIN_FILE_NAME).exists()); | ||||
| 
 | ||||
|         let k3d = K3d::new(dir.clone(), None); | ||||
|         let latest_release = k3d.get_latest_release_tag().await.unwrap(); | ||||
| @ -382,12 +382,12 @@ mod test { | ||||
|     async fn k3d_download_latest_release_should_get_latest_bin() { | ||||
|         let dir = get_clean_test_directory(); | ||||
| 
 | ||||
|         assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false); | ||||
|         assert!(!dir.join(K3D_BIN_FILE_NAME).exists()); | ||||
| 
 | ||||
|         let k3d = K3d::new(dir.clone(), None); | ||||
|         let bin_file_path = k3d.download_latest_release().await.unwrap(); | ||||
|         assert_eq!(bin_file_path, dir.join(K3D_BIN_FILE_NAME)); | ||||
|         assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), true); | ||||
|         assert!(dir.join(K3D_BIN_FILE_NAME).exists()); | ||||
|     } | ||||
| 
 | ||||
|     fn get_clean_test_directory() -> PathBuf { | ||||
|  | ||||
| @ -1,4 +1,3 @@ | ||||
| use rand; | ||||
| use rand::Rng; | ||||
| use xml::reader::XmlEvent as ReadEvent; | ||||
| use xml::writer::XmlEvent as WriteEvent; | ||||
| @ -14,7 +13,7 @@ impl YaDeserializeTrait for HAProxyId { | ||||
|             ReadEvent::StartElement { | ||||
|                 name, attributes, .. | ||||
|             } => { | ||||
|                 if attributes.len() > 0 { | ||||
|                 if !attributes.is_empty() { | ||||
|                     return Err(String::from( | ||||
|                         "Attributes not currently supported by HAProxyId", | ||||
|                     )); | ||||
|  | ||||
| @ -51,7 +51,7 @@ pub struct OPNsense { | ||||
| impl From<String> for OPNsense { | ||||
|     fn from(content: String) -> Self { | ||||
|         yaserde::de::from_str(&content) | ||||
|             .map_err(|e| println!("{}", e.to_string())) | ||||
|             .map_err(|e| println!("{}", e)) | ||||
|             .expect("OPNSense received invalid string, should be full XML") | ||||
|     } | ||||
| } | ||||
| @ -59,7 +59,7 @@ impl From<String> for OPNsense { | ||||
| impl OPNsense { | ||||
|     pub fn to_xml(&self) -> String { | ||||
|         to_xml_str(self) | ||||
|             .map_err(|e| error!("{}", e.to_string())) | ||||
|             .map_err(|e| error!("{}", e)) | ||||
|             .expect("OPNSense could not serialize to XML") | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -23,3 +23,6 @@ tokio-stream = "0.1.17" | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| pretty_assertions.workspace = true | ||||
| 
 | ||||
| [lints.rust] | ||||
| unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] } | ||||
|  | ||||
| @ -210,7 +210,7 @@ mod tests { | ||||
| 
 | ||||
|     #[tokio::test] | ||||
|     async fn test_load_config_from_local_file() { | ||||
|         for path in vec![ | ||||
|         for path in [ | ||||
|             "src/tests/data/config-opnsense-25.1.xml", | ||||
|             "src/tests/data/config-vm-test.xml", | ||||
|             "src/tests/data/config-structure.xml", | ||||
| @ -236,9 +236,9 @@ mod tests { | ||||
| 
 | ||||
|             // Since the order of all fields is not always the same in opnsense config files
 | ||||
|             // I think it is good enough to have exactly the same amount of the same lines
 | ||||
|             let config_file_str_sorted = vec![config_file_str.lines().collect::<Vec<_>>()].sort(); | ||||
|             let serialized_sorted = vec![config_file_str.lines().collect::<Vec<_>>()].sort(); | ||||
|             assert_eq!(config_file_str_sorted, serialized_sorted); | ||||
|             [config_file_str.lines().collect::<Vec<_>>()].sort(); | ||||
|             [config_file_str.lines().collect::<Vec<_>>()].sort(); | ||||
|             assert_eq!((), ()); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -292,7 +292,7 @@ mod tests { | ||||
| ///
 | ||||
| /// * `true` if the package name is found in the CSV string, `false` otherwise.
 | ||||
| fn is_package_in_csv(csv_string: &str, package_name: &str) -> bool { | ||||
|     package_name.len() > 0 && csv_string.split(',').any(|pkg| pkg.trim() == package_name) | ||||
|     !package_name.is_empty() && csv_string.split(',').any(|pkg| pkg.trim() == package_name) | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
|  | ||||
| @ -45,7 +45,7 @@ impl SshConfigManager { | ||||
|     async fn reload_all_services(&self) -> Result<String, Error> { | ||||
|         info!("Reloading all opnsense services"); | ||||
|         self.opnsense_shell | ||||
|             .exec(&format!("configctl service reload all")) | ||||
|             .exec("configctl service reload all") | ||||
|             .await | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,3 +1,4 @@ | ||||
| #[allow(clippy::module_inception)] | ||||
| mod config; | ||||
| mod manager; | ||||
| mod shell; | ||||
|  | ||||
| @ -107,7 +107,7 @@ impl OPNsenseShell for SshOPNSenseShell { | ||||
|                     match result { | ||||
|                         Ok(bytes) => { | ||||
|                             if !bytes.is_empty() { | ||||
|                                 remote_file.write(&bytes).await?; | ||||
|                                 AsyncWriteExt::write_all(&mut remote_file, &bytes).await?; | ||||
|                             } | ||||
|                         } | ||||
|                         Err(e) => todo!("Error unhandled {e}"), | ||||
| @ -194,9 +194,9 @@ async fn wait_for_completion(channel: &mut Channel<Msg>) -> Result<String, Error | ||||
|                     ))); | ||||
|                 } | ||||
|             } | ||||
|             russh::ChannelMsg::Success { .. } | ||||
|             russh::ChannelMsg::Success | ||||
|             | russh::ChannelMsg::WindowAdjusted { .. } | ||||
|             | russh::ChannelMsg::Eof { .. } => {} | ||||
|             | russh::ChannelMsg::Eof => {} | ||||
|             _ => { | ||||
|                 return Err(Error::Unexpected(format!( | ||||
|                     "Russh got unexpected msg {msg:?}" | ||||
|  | ||||
| @ -64,7 +64,7 @@ impl<'a> DhcpConfig<'a> { | ||||
|             .dhcpd | ||||
|             .elements | ||||
|             .iter_mut() | ||||
|             .find(|(name, _config)| return name == "lan") | ||||
|             .find(|(name, _config)| name == "lan") | ||||
|             .expect("Interface lan should have dhcpd activated") | ||||
|             .1 | ||||
|     } | ||||
| @ -93,11 +93,7 @@ impl<'a> DhcpConfig<'a> { | ||||
|                 == ipaddr | ||||
|                 && m.mac == mac | ||||
|         }) { | ||||
|             info!( | ||||
|                 "Mapping already exists for {} [{}], skipping", | ||||
|                 ipaddr.to_string(), | ||||
|                 mac | ||||
|             ); | ||||
|             info!("Mapping already exists for {} [{}], skipping", ipaddr, mac); | ||||
|             return Ok(()); | ||||
|         } | ||||
| 
 | ||||
| @ -145,9 +141,8 @@ impl<'a> DhcpConfig<'a> { | ||||
|             .exec("configctl dhcpd list static") | ||||
|             .await?; | ||||
| 
 | ||||
|         let value: serde_json::Value = serde_json::from_str(&list_static_output).expect(&format!( | ||||
|             "Got invalid json from configctl {list_static_output}" | ||||
|         )); | ||||
|         let value: serde_json::Value = serde_json::from_str(&list_static_output) | ||||
|             .unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}")); | ||||
|         let static_maps = value["dhcpd"] | ||||
|             .as_array() | ||||
|             .ok_or(Error::Command(format!( | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user