Compare commits
9 Commits
feat/insta
...
759a9287d3
| Author | SHA1 | Date | |
|---|---|---|---|
| 759a9287d3 | |||
| 24922321b1 | |||
| 4ff57062ae | |||
| 50ce54ea66 | |||
|
|
827a49e56b | ||
| cf84f2cce8 | |||
| a12d12aa4f | |||
| cefb65933a | |||
| 95cfc03518 |
19
Cargo.lock
generated
19
Cargo.lock
generated
@@ -1804,6 +1804,25 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-okd-cluster-alerts"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"brocade",
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_secret",
|
||||
"harmony_secret_derive",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"serde",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-okd-install"
|
||||
version = "0.1.0"
|
||||
|
||||
@@ -106,7 +106,6 @@ async fn main() {
|
||||
name: "wk2".to_string(),
|
||||
},
|
||||
],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
};
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
[package]
|
||||
name = "example-opnsense-node-exporter"
|
||||
name = "example-okd-cluster-alerts"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
@@ -18,4 +19,4 @@ log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
serde.workspace = true
|
||||
async-trait.workspace = true
|
||||
brocade = { path = "../../brocade" }
|
||||
26
examples/okd_cluster_alerts/src/main.rs
Normal file
26
examples/okd_cluster_alerts/src/main.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
okd::cluster_monitoring::OpenshiftClusterAlertScore,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(OpenshiftClusterAlertScore {
|
||||
receivers: vec![Box::new(DiscordWebhook {
|
||||
name: "discord-webhook-example".to_string(),
|
||||
url: hurl!("http://something.o"),
|
||||
})],
|
||||
})],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -80,7 +80,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
name: "bootstrap".to_string(),
|
||||
},
|
||||
workers: vec![],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,6 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
name: "cp0".to_string(),
|
||||
},
|
||||
workers: vec![],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,7 +78,6 @@ async fn main() {
|
||||
name: "cp0".to_string(),
|
||||
},
|
||||
workers: vec![],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
};
|
||||
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
executors::ExecutorError,
|
||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
modules::opnsense::node_exporter::NodeExporterScore,
|
||||
topology::{
|
||||
HAClusterTopology, LogicalHost, PreparationError, PreparationOutcome, Topology,
|
||||
UnmanagedRouter, node_exporter::NodeExporter,
|
||||
},
|
||||
};
|
||||
use harmony_macros::{ip, ipv4, mac_address};
|
||||
|
||||
struct OpnSenseTopology {
|
||||
node_exporter: Arc<dyn NodeExporter>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Topology for OpnSenseTopology {
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: "Success".to_string(),
|
||||
})
|
||||
}
|
||||
fn name(&self) -> &str {
|
||||
"OpnsenseTopology"
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NodeExporter for OpnSenseTopology {
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||
self.node_exporter.ensure_initialized().await
|
||||
}
|
||||
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
self.node_exporter.commit_config().await
|
||||
}
|
||||
|
||||
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||
self.node_exporter.reload_restart().await
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let firewall = harmony::topology::LogicalHost {
|
||||
ip: ip!("192.168.1.1"),
|
||||
name: String::from("fw0"),
|
||||
};
|
||||
|
||||
let opnsense = Arc::new(
|
||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||
);
|
||||
|
||||
let topology = OpnSenseTopology {
|
||||
node_exporter: opnsense.clone(),
|
||||
};
|
||||
|
||||
let inventory = Inventory::empty();
|
||||
|
||||
let node_exporter_score = NodeExporterScore {};
|
||||
|
||||
harmony_cli::run(
|
||||
inventory,
|
||||
topology,
|
||||
vec![Box::new(node_exporter_score)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -8,15 +8,12 @@ use kube::api::ObjectMeta;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
|
||||
use crate::modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy};
|
||||
use crate::topology::PxeOptions;
|
||||
use crate::{data::FileContent, modules::okd::crd::nmstate::NMState};
|
||||
use crate::{
|
||||
executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec,
|
||||
};
|
||||
use crate::{
|
||||
modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy},
|
||||
topology::node_exporter::NodeExporter,
|
||||
};
|
||||
|
||||
use super::{
|
||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||
@@ -38,7 +35,6 @@ pub struct HAClusterTopology {
|
||||
pub tftp_server: Arc<dyn TftpServer>,
|
||||
pub http_server: Arc<dyn HttpServer>,
|
||||
pub dns_server: Arc<dyn DnsServer>,
|
||||
pub node_exporter: Arc<dyn NodeExporter>,
|
||||
pub switch_client: Arc<dyn SwitchClient>,
|
||||
pub bootstrap_host: LogicalHost,
|
||||
pub control_plane: Vec<LogicalHost>,
|
||||
@@ -301,7 +297,6 @@ impl HAClusterTopology {
|
||||
tftp_server: dummy_infra.clone(),
|
||||
http_server: dummy_infra.clone(),
|
||||
dns_server: dummy_infra.clone(),
|
||||
node_exporter: dummy_infra.clone(),
|
||||
switch_client: dummy_infra.clone(),
|
||||
bootstrap_host: dummy_host,
|
||||
control_plane: vec![],
|
||||
@@ -479,23 +474,6 @@ impl Switch for HAClusterTopology {
|
||||
self.configure_bond(config).await?;
|
||||
self.configure_port_channel(config).await
|
||||
}
|
||||
|
||||
//TODO add snmp here
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NodeExporter for HAClusterTopology {
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||
self.node_exporter.ensure_initialized().await
|
||||
}
|
||||
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
self.node_exporter.commit_config().await
|
||||
}
|
||||
|
||||
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||
self.node_exporter.reload_restart().await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -685,21 +663,6 @@ impl DnsServer for DummyInfra {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NodeExporter for DummyInfra {
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
|
||||
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SwitchClient for DummyInfra {
|
||||
async fn setup(&self) -> Result<(), SwitchError> {
|
||||
|
||||
@@ -13,7 +13,7 @@ use kube::{
|
||||
Client, Config, Discovery, Error, Resource,
|
||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
core::{DynamicResourceScope, ErrorResponse},
|
||||
discovery::{ApiCapabilities, Scope},
|
||||
error::DiscoveryError,
|
||||
runtime::reflector::Lookup,
|
||||
@@ -94,6 +94,23 @@ impl K8sClient {
|
||||
resource.get(name).await
|
||||
}
|
||||
|
||||
pub async fn get_secret_json_value(
|
||||
&self,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<DynamicObject, Error> {
|
||||
self.get_resource_json_value(
|
||||
name,
|
||||
namespace,
|
||||
&GroupVersionKind {
|
||||
group: "".to_string(),
|
||||
version: "v1".to_string(),
|
||||
kind: "Secret".to_string(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_deployment(
|
||||
&self,
|
||||
name: &str,
|
||||
@@ -337,6 +354,169 @@ impl K8sClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_api_for_dynamic_object(
|
||||
&self,
|
||||
object: &DynamicObject,
|
||||
ns: Option<&str>,
|
||||
) -> Result<Api<DynamicObject>, Error> {
|
||||
let api_resource = object
|
||||
.types
|
||||
.as_ref()
|
||||
.and_then(|t| {
|
||||
let parts: Vec<&str> = t.api_version.split('/').collect();
|
||||
match parts.as_slice() {
|
||||
[version] => Some(ApiResource::from_gvk(&GroupVersionKind::gvk(
|
||||
"", version, &t.kind,
|
||||
))),
|
||||
[group, version] => Some(ApiResource::from_gvk(&GroupVersionKind::gvk(
|
||||
group, version, &t.kind,
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
Error::BuildRequest(kube::core::request::Error::Validation(
|
||||
"Invalid apiVersion in DynamicObject {object:#?}".to_string(),
|
||||
))
|
||||
})?;
|
||||
|
||||
match ns {
|
||||
Some(ns) => Ok(Api::namespaced_with(self.client.clone(), ns, &api_resource)),
|
||||
None => Ok(Api::default_namespaced_with(
|
||||
self.client.clone(),
|
||||
&api_resource,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn apply_dynamic_many(
|
||||
&self,
|
||||
resource: &[DynamicObject],
|
||||
namespace: Option<&str>,
|
||||
force_conflicts: bool,
|
||||
) -> Result<Vec<DynamicObject>, Error> {
|
||||
let mut result = Vec::new();
|
||||
for r in resource.iter() {
|
||||
result.push(self.apply_dynamic(r, namespace, force_conflicts).await?);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Apply DynamicObject resource to the cluster
|
||||
pub async fn apply_dynamic(
|
||||
&self,
|
||||
resource: &DynamicObject,
|
||||
namespace: Option<&str>,
|
||||
force_conflicts: bool,
|
||||
) -> Result<DynamicObject, Error> {
|
||||
// Build API for this dynamic object
|
||||
let api = self.get_api_for_dynamic_object(resource, namespace)?;
|
||||
let name = resource
|
||||
.metadata
|
||||
.name
|
||||
.as_ref()
|
||||
.ok_or_else(|| {
|
||||
Error::BuildRequest(kube::core::request::Error::Validation(
|
||||
"DynamicObject must have metadata.name".to_string(),
|
||||
))
|
||||
})?
|
||||
.as_str();
|
||||
|
||||
debug!(
|
||||
"Applying dynamic resource kind={:?} apiVersion={:?} name='{}' ns={:?}",
|
||||
resource.types.as_ref().map(|t| &t.kind),
|
||||
resource.types.as_ref().map(|t| &t.api_version),
|
||||
name,
|
||||
namespace
|
||||
);
|
||||
trace!(
|
||||
"Dynamic resource payload:\n{:#}",
|
||||
serde_json::to_value(resource).unwrap_or(serde_json::Value::Null)
|
||||
);
|
||||
|
||||
// Using same field manager as in apply()
|
||||
let mut patch_params = PatchParams::apply("harmony");
|
||||
patch_params.force = force_conflicts;
|
||||
|
||||
if *crate::config::DRY_RUN {
|
||||
// Dry-run path: fetch current, show diff, and return appropriate object
|
||||
match api.get(name).await {
|
||||
Ok(current) => {
|
||||
trace!("Received current dynamic value {current:#?}");
|
||||
|
||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
||||
|
||||
// Serialize current and new, and strip status from current if present
|
||||
let mut current_yaml =
|
||||
serde_yaml::to_value(¤t).unwrap_or_else(|_| serde_yaml::Value::Null);
|
||||
if let Some(map) = current_yaml.as_mapping_mut() {
|
||||
if map.contains_key(&serde_yaml::Value::String("status".to_string())) {
|
||||
let removed =
|
||||
map.remove(&serde_yaml::Value::String("status".to_string()));
|
||||
trace!("Removed status from current dynamic object: {:?}", removed);
|
||||
} else {
|
||||
trace!(
|
||||
"Did not find status entry for current dynamic object {}/{}",
|
||||
current.metadata.namespace.as_deref().unwrap_or(""),
|
||||
current.metadata.name.as_deref().unwrap_or("")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let current_yaml = serde_yaml::to_string(¤t_yaml)
|
||||
.unwrap_or_else(|_| "Failed to serialize current resource".to_string());
|
||||
let new_yaml = serde_yaml::to_string(resource)
|
||||
.unwrap_or_else(|_| "Failed to serialize new resource".to_string());
|
||||
|
||||
if current_yaml == new_yaml {
|
||||
println!("No changes detected.");
|
||||
return Ok(current);
|
||||
}
|
||||
|
||||
println!("Changes detected:");
|
||||
let diff = TextDiff::from_lines(¤t_yaml, &new_yaml);
|
||||
for change in diff.iter_all_changes() {
|
||||
let sign = match change.tag() {
|
||||
similar::ChangeTag::Delete => "-",
|
||||
similar::ChangeTag::Insert => "+",
|
||||
similar::ChangeTag::Equal => " ",
|
||||
};
|
||||
print!("{}{}", sign, change);
|
||||
}
|
||||
|
||||
// Return the incoming resource as the would-be applied state
|
||||
Ok(resource.clone())
|
||||
}
|
||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||
println!("\nPerforming dry-run for new resource: '{}'", name);
|
||||
println!(
|
||||
"Resource does not exist. It would be created with the following content:"
|
||||
);
|
||||
let new_yaml = serde_yaml::to_string(resource)
|
||||
.unwrap_or_else(|_| "Failed to serialize new resource".to_string());
|
||||
for line in new_yaml.lines() {
|
||||
println!("+{}", line);
|
||||
}
|
||||
Ok(resource.clone())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to get dynamic resource '{}': {}", name, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Real apply via server-side apply
|
||||
debug!("Patching (server-side apply) dynamic resource '{}'", name);
|
||||
api.patch(name, &patch_params, &Patch::Apply(resource))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to apply dynamic resource '{}': {}", name, e);
|
||||
e
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a resource in namespace
|
||||
///
|
||||
/// See `kubectl apply` for more information on the expected behavior of this function
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
mod ha_cluster;
|
||||
pub mod ingress;
|
||||
pub mod node_exporter;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
#[async_trait]
|
||||
pub trait NodeExporter: Send + Sync {
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError>;
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError>;
|
||||
async fn reload_restart(&self) -> Result<(), ExecutorError>;
|
||||
}
|
||||
|
||||
//TODO complete this impl
|
||||
impl std::fmt::Debug for dyn NodeExporter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("NodeExporter ",))
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::DynamicObject;
|
||||
use log::debug;
|
||||
|
||||
use crate::{
|
||||
@@ -76,6 +77,14 @@ pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
fn name(&self) -> String;
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AlertManagerReceiver {
|
||||
pub receiver_config: serde_json::Value,
|
||||
// FIXME we should not leak k8s here. DynamicObject is k8s specific
|
||||
pub additional_ressources: Vec<DynamicObject>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -14,7 +14,7 @@ use k8s_openapi::{
|
||||
},
|
||||
apimachinery::pkg::util::intstr::IntOrString,
|
||||
};
|
||||
use kube::Resource;
|
||||
use kube::{api::DynamicObject, Resource};
|
||||
use log::debug;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
|
||||
182
harmony/src/infra/kube.rs
Normal file
182
harmony/src/infra/kube.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use k8s_openapi::Resource as K8sResource;
|
||||
use kube::api::{ApiResource, DynamicObject, GroupVersionKind};
|
||||
use kube::core::TypeMeta;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
|
||||
/// Convert a typed Kubernetes resource `K` into a `DynamicObject`.
|
||||
///
|
||||
/// Requirements:
|
||||
/// - `K` must be a k8s_openapi resource (provides static GVK via `Resource`).
|
||||
/// - `K` must have standard Kubernetes shape (metadata + payload fields).
|
||||
///
|
||||
/// Notes:
|
||||
/// - We set `types` (apiVersion/kind) and copy `metadata`.
|
||||
/// - We place the remaining top-level fields into `obj.data` as JSON.
|
||||
/// - Scope is not encoded on the object itself; you still need the corresponding
|
||||
/// `DynamicResource` (derived from K::group/version/kind) when constructing an Api.
|
||||
///
|
||||
/// Example usage:
|
||||
/// let dyn_obj = kube_resource_to_dynamic(secret)?;
|
||||
/// let api: Api<DynamicObject> = Api::namespaced_with(client, "ns", &dr);
|
||||
/// api.patch(&dyn_obj.name_any(), &PatchParams::apply("mgr"), &Patch::Apply(dyn_obj)).await?;
|
||||
pub fn kube_resource_to_dynamic<K>(res: &K) -> Result<DynamicObject, String>
|
||||
where
|
||||
K: K8sResource + Serialize + DeserializeOwned,
|
||||
{
|
||||
// Serialize the typed resource to JSON so we can split metadata and payload
|
||||
let mut v = serde_json::to_value(res).map_err(|e| format!("Failed to serialize : {e}"))?;
|
||||
let obj = v
|
||||
.as_object_mut()
|
||||
.ok_or_else(|| "expected object JSON".to_string())?;
|
||||
|
||||
// Extract and parse metadata into kube::core::ObjectMeta
|
||||
let metadata_value = obj
|
||||
.remove("metadata")
|
||||
.ok_or_else(|| "missing metadata".to_string())?;
|
||||
let metadata: kube::core::ObjectMeta = serde_json::from_value(metadata_value)
|
||||
.map_err(|e| format!("Failed to deserialize : {e}"))?;
|
||||
|
||||
// Name is required for DynamicObject::new; prefer metadata.name
|
||||
let name = metadata
|
||||
.name
|
||||
.clone()
|
||||
.ok_or_else(|| "metadata.name is required".to_string())?;
|
||||
|
||||
// Remaining fields (spec/status/data/etc.) become the dynamic payload
|
||||
let payload = Value::Object(obj.clone());
|
||||
|
||||
// Construct the DynamicObject
|
||||
let mut dyn_obj = DynamicObject::new(
|
||||
&name,
|
||||
&ApiResource::from_gvk(&GroupVersionKind::gvk(K::GROUP, K::VERSION, K::KIND)),
|
||||
);
|
||||
dyn_obj.types = Some(TypeMeta {
|
||||
api_version: api_version_for::<K>(),
|
||||
kind: K::KIND.into(),
|
||||
});
|
||||
|
||||
// Preserve namespace/labels/annotations/etc.
|
||||
dyn_obj.metadata = metadata;
|
||||
|
||||
// Attach payload
|
||||
dyn_obj.data = payload;
|
||||
|
||||
Ok(dyn_obj)
|
||||
}
|
||||
|
||||
/// Helper: compute apiVersion string ("group/version" or "v1" for core).
|
||||
fn api_version_for<K>() -> String
|
||||
where
|
||||
K: K8sResource,
|
||||
{
|
||||
let group = K::GROUP;
|
||||
let version = K::VERSION;
|
||||
if group.is_empty() {
|
||||
version.to_string() // core/v1 => "v1"
|
||||
} else {
|
||||
format!("{}/{}", group, version)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use k8s_openapi::api::{
|
||||
apps::v1::{Deployment, DeploymentSpec},
|
||||
core::v1::{PodTemplateSpec, Secret},
|
||||
};
|
||||
use kube::api::ObjectMeta;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn secret_to_dynamic_roundtrip() {
|
||||
// Create a sample Secret resource
|
||||
let mut secret = Secret {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("my-secret".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Convert to DynamicResource
|
||||
let dynamic: DynamicObject =
|
||||
kube_resource_to_dynamic(&secret).expect("Failed to convert Secret to DynamicResource");
|
||||
|
||||
// Serialize both the original and dynamic resources to Value
|
||||
let original_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||
let dynamic_value =
|
||||
serde_json::to_value(&dynamic).expect("Failed to serialize DynamicResource");
|
||||
|
||||
// Assert that they are identical
|
||||
assert_eq!(original_value, dynamic_value);
|
||||
|
||||
secret.metadata.namespace = Some("false".to_string());
|
||||
let modified_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||
assert_ne!(modified_value, dynamic_value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deployment_to_dynamic_roundtrip() {
|
||||
// Create a sample Deployment with nested structures
|
||||
let mut deployment = Deployment {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("my-deployment".to_string()),
|
||||
labels: Some({
|
||||
let mut map = std::collections::BTreeMap::new();
|
||||
map.insert("app".to_string(), "nginx".to_string());
|
||||
map
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
spec: Some(DeploymentSpec {
|
||||
replicas: Some(3),
|
||||
selector: Default::default(),
|
||||
template: PodTemplateSpec {
|
||||
metadata: Some(ObjectMeta {
|
||||
labels: Some({
|
||||
let mut map = std::collections::BTreeMap::new();
|
||||
map.insert("app".to_string(), "nginx".to_string());
|
||||
map
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
spec: Some(Default::default()), // PodSpec with empty containers for simplicity
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let dynamic = kube_resource_to_dynamic(&deployment).expect("Failed to convert Deployment");
|
||||
|
||||
let original_value = serde_json::to_value(&deployment).unwrap();
|
||||
let dynamic_value = serde_json::to_value(&dynamic).unwrap();
|
||||
|
||||
assert_eq!(original_value, dynamic_value);
|
||||
|
||||
assert_eq!(
|
||||
dynamic.data.get("spec").unwrap().get("replicas").unwrap(),
|
||||
3
|
||||
);
|
||||
assert_eq!(
|
||||
dynamic
|
||||
.data
|
||||
.get("spec")
|
||||
.unwrap()
|
||||
.get("template")
|
||||
.unwrap()
|
||||
.get("metadata")
|
||||
.unwrap()
|
||||
.get("labels")
|
||||
.unwrap()
|
||||
.get("app")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
"nginx".to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -3,5 +3,6 @@ pub mod executors;
|
||||
pub mod hp_ilo;
|
||||
pub mod intel_amt;
|
||||
pub mod inventory;
|
||||
pub mod kube;
|
||||
pub mod opnsense;
|
||||
mod sqlx;
|
||||
|
||||
@@ -4,7 +4,6 @@ mod firewall;
|
||||
mod http;
|
||||
mod load_balancer;
|
||||
mod management;
|
||||
pub mod node_exporter;
|
||||
mod tftp;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError, infra::opnsense::OPNSenseFirewall,
|
||||
topology::node_exporter::NodeExporter,
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl NodeExporter for OPNSenseFirewall {
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||
let mut config = self.opnsense_config.write().await;
|
||||
let node_exporter = config.node_exporter();
|
||||
if let Some(config) = node_exporter.get_full_config() {
|
||||
debug!(
|
||||
"Node exporter available in opnsense config, assuming it is already installed. {config:?}"
|
||||
);
|
||||
} else {
|
||||
config
|
||||
.install_package("os-node_exporter")
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ExecutorError::UnexpectedError(format!("Executor failed when trying to install os-node_exporter package with error {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
config
|
||||
.node_exporter()
|
||||
.enable(true)
|
||||
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
OPNSenseFirewall::commit_config(self).await
|
||||
}
|
||||
|
||||
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||
self.opnsense_config
|
||||
.write()
|
||||
.await
|
||||
.node_exporter()
|
||||
.reload_restart()
|
||||
.await
|
||||
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
|
||||
}
|
||||
}
|
||||
@@ -3,16 +3,20 @@ use std::collections::BTreeMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use k8s_openapi::api::core::v1::Secret;
|
||||
use kube::api::ObjectMeta;
|
||||
use kube::Resource;
|
||||
use kube::api::{DynamicObject, ObjectMeta};
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
use serde_yaml::{Mapping, Value};
|
||||
|
||||
use crate::infra::kube::kube_resource_to_dynamic;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
|
||||
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::modules::monitoring::okd::OpenshiftClusterAlertSender;
|
||||
use crate::topology::oberservability::monitoring::AlertManagerReceiver;
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
modules::monitoring::{
|
||||
@@ -28,14 +32,25 @@ use harmony_types::net::Url;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct DiscordWebhook {
|
||||
// FIXME use a stricter type as this is used as a k8s resource name. It could also be converted
|
||||
// to remove whitespace and other invalid characters, but this is a potential bug that is not
|
||||
// very easy to figure out for beginners.
|
||||
//
|
||||
// It gives out error messages like this :
|
||||
//
|
||||
// [2025-10-30 15:10:49 ERROR harmony::domain::topology::k8s] Failed to get dynamic resource 'Webhook example-secret': Failed to build request: failed to build request: invalid uri character
|
||||
// [2025-10-30 15:10:49 ERROR harmony_cli::cli_logger] ⚠️ InterpretError : Failed to build request: failed to build request: invalid uri character
|
||||
// [2025-10-30 15:10:49 DEBUG harmony::domain::maestro] Got result Err(InterpretError { msg: "InterpretError : Failed to build request: failed to build request: invalid uri character" })
|
||||
// [2025-10-30 15:10:49 INFO harmony_cli::cli_logger] 🎼 Harmony completed
|
||||
//
|
||||
// thread 'main' panicked at examples/okd_cluster_alerts/src/main.rs:25:6:
|
||||
// called `Result::unwrap()` on an `Err` value: InterpretError { msg: "InterpretError : Failed to build request: failed to build request: invalid uri character" }
|
||||
pub name: String,
|
||||
pub url: Url,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||
let ns = sender.namespace.clone();
|
||||
impl DiscordWebhook {
|
||||
fn get_receiver_config(&self) -> Result<AlertManagerReceiver, String> {
|
||||
let secret_name = format!("{}-secret", self.name.clone());
|
||||
let webhook_key = format!("{}", self.url.clone());
|
||||
|
||||
@@ -52,14 +67,10 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let _ = sender.client.apply(&secret, Some(&ns)).await;
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
"route": {
|
||||
"receiver": self.name,
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
Ok(AlertManagerReceiver {
|
||||
additional_ressources: vec![kube_resource_to_dynamic(&secret)?],
|
||||
|
||||
receiver_config: json!({
|
||||
"name": self.name,
|
||||
"discordConfigs": [
|
||||
{
|
||||
@@ -71,7 +82,59 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
"message": "{{ template \"discord.default.message\" . }}"
|
||||
}
|
||||
]
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<OpenshiftClusterAlertSender> for DiscordWebhook {
|
||||
async fn install(
|
||||
&self,
|
||||
sender: &OpenshiftClusterAlertSender,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
self.get_receiver_config()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||
let ns = sender.namespace.clone();
|
||||
|
||||
let config = self.get_receiver_config()?;
|
||||
for resource in config.additional_ressources.iter() {
|
||||
todo!("can I apply a dynamicresource");
|
||||
// sender.client.apply(resource, Some(&ns)).await;
|
||||
}
|
||||
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
"route": {
|
||||
"receiver": self.name,
|
||||
},
|
||||
"receivers": [
|
||||
config.receiver_config
|
||||
]
|
||||
}),
|
||||
};
|
||||
@@ -122,6 +185,9 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||
let ns = sender.namespace.clone();
|
||||
let secret_name = format!("{}-secret", self.name.clone());
|
||||
@@ -200,6 +266,9 @@ impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<Prometheus> for DiscordWebhook {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||
sender.install_receiver(self).await
|
||||
}
|
||||
@@ -226,6 +295,9 @@ impl PrometheusReceiver for DiscordWebhook {
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||
sender.install_receiver(self).await
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ use crate::{
|
||||
},
|
||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||
},
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
topology::oberservability::monitoring::{AlertManagerReceiver, AlertReceiver},
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
@@ -31,6 +31,9 @@ pub struct WebhookReceiver {
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
@@ -97,6 +100,9 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
@@ -158,6 +164,9 @@ impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<Prometheus> for WebhookReceiver {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||
sender.install_receiver(self).await
|
||||
}
|
||||
@@ -184,6 +193,9 @@ impl PrometheusReceiver for WebhookReceiver {
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
||||
fn as_alertmanager_receiver(&self) -> Result<AlertManagerReceiver, String> {
|
||||
todo!()
|
||||
}
|
||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||
sender.install_receiver(self).await
|
||||
}
|
||||
|
||||
214
harmony/src/modules/monitoring/okd/cluster_monitoring.rs
Normal file
214
harmony/src/modules/monitoring/okd/cluster_monitoring.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use base64::prelude::*;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use kube::api::DynamicObject;
|
||||
use log::{debug, info, trace};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::monitoring::okd::OpenshiftClusterAlertSender,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology, oberservability::monitoring::AlertReceiver},
|
||||
};
|
||||
|
||||
impl Clone for Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OpenshiftClusterAlertScore {
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<OpenshiftClusterAlertSender>>>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for OpenshiftClusterAlertScore {
|
||||
fn name(&self) -> String {
|
||||
"ClusterAlertScore".to_string()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(OpenshiftClusterAlertInterpret {
|
||||
receivers: self.receivers.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OpenshiftClusterAlertInterpret {
|
||||
receivers: Vec<Box<dyn AlertReceiver<OpenshiftClusterAlertSender>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftClusterAlertInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let client = topology.k8s_client().await?;
|
||||
let openshift_monitoring_namespace = "openshift-monitoring";
|
||||
|
||||
let mut alertmanager_main_secret: DynamicObject = client
|
||||
.get_secret_json_value("alertmanager-main", Some(openshift_monitoring_namespace))
|
||||
.await?;
|
||||
trace!("Got secret {alertmanager_main_secret:#?}");
|
||||
|
||||
let data: &mut serde_json::Value = &mut alertmanager_main_secret.data;
|
||||
trace!("Alertmanager-main secret data {data:#?}");
|
||||
let data_obj = data
|
||||
.get_mut("data")
|
||||
.ok_or(InterpretError::new(
|
||||
"Missing 'data' field in alertmanager-main secret.".to_string(),
|
||||
))?
|
||||
.as_object_mut()
|
||||
.ok_or(InterpretError::new(
|
||||
"'data' field in alertmanager-main secret is expected to be an object ."
|
||||
.to_string(),
|
||||
))?;
|
||||
|
||||
let config_b64 = data_obj
|
||||
.get("alertmanager.yaml")
|
||||
.ok_or(InterpretError::new(
|
||||
"Missing 'alertmanager.yaml' in alertmanager-main secret data".to_string(),
|
||||
))?
|
||||
.as_str()
|
||||
.unwrap_or("");
|
||||
trace!("Config base64 {config_b64}");
|
||||
|
||||
let config_bytes = BASE64_STANDARD.decode(config_b64).unwrap_or_default();
|
||||
|
||||
let mut am_config: serde_yaml::Value =
|
||||
serde_yaml::from_str(&String::from_utf8(config_bytes).unwrap_or_default())
|
||||
.unwrap_or_default();
|
||||
|
||||
debug!("Current alertmanager config {am_config:#?}");
|
||||
|
||||
let existing_receivers_sequence = if let Some(receivers) = am_config.get_mut("receivers") {
|
||||
match receivers.as_sequence_mut() {
|
||||
Some(seq) => seq,
|
||||
None => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Expected alertmanager config receivers to be a sequence, got {:?}",
|
||||
receivers
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
&mut serde_yaml::Sequence::default()
|
||||
};
|
||||
|
||||
let mut additional_resources = vec![];
|
||||
|
||||
for custom_receiver in &self.receivers {
|
||||
let name = custom_receiver.name();
|
||||
let alertmanager_receiver = custom_receiver.as_alertmanager_receiver()?;
|
||||
|
||||
let json_value = alertmanager_receiver.receiver_config;
|
||||
|
||||
let yaml_string = serde_json::to_string(&json_value).map_err(|e| {
|
||||
InterpretError::new(format!("Failed to serialize receiver config: {}", e))
|
||||
})?;
|
||||
|
||||
let yaml_value: serde_yaml::Value =
|
||||
serde_yaml::from_str(&yaml_string).map_err(|e| {
|
||||
InterpretError::new(format!("Failed to parse receiver config as YAML: {}", e))
|
||||
})?;
|
||||
|
||||
if let Some(idx) = existing_receivers_sequence.iter().position(|r| {
|
||||
r.get("name")
|
||||
.and_then(|n| n.as_str())
|
||||
.map_or(false, |n| n == name)
|
||||
}) {
|
||||
info!("Replacing existing AlertManager receiver: {}", name);
|
||||
existing_receivers_sequence[idx] = yaml_value;
|
||||
} else {
|
||||
debug!("Adding new AlertManager receiver: {}", name);
|
||||
existing_receivers_sequence.push(yaml_value);
|
||||
}
|
||||
|
||||
additional_resources.push(alertmanager_receiver.additional_ressources);
|
||||
}
|
||||
|
||||
debug!("Current alertmanager config {am_config:#?}");
|
||||
// TODO
|
||||
// - save new version of alertmanager config
|
||||
// - write additional ressources to the cluster
|
||||
let am_config = serde_yaml::to_string(&am_config).map_err(|e| {
|
||||
InterpretError::new(format!(
|
||||
"Failed to serialize new alertmanager config to string : {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let mut am_config_b64 = String::new();
|
||||
BASE64_STANDARD.encode_string(am_config, &mut am_config_b64);
|
||||
|
||||
// TODO put update configmap value and save new value
|
||||
data_obj.insert(
|
||||
"alertmanager.yaml".to_string(),
|
||||
serde_json::Value::String(am_config_b64),
|
||||
);
|
||||
|
||||
// https://kubernetes.io/docs/reference/using-api/server-side-apply/#field-management
|
||||
alertmanager_main_secret.metadata.managed_fields = None;
|
||||
|
||||
trace!("Applying new alertmanager_main_secret {alertmanager_main_secret:#?}");
|
||||
client
|
||||
.apply_dynamic(
|
||||
&alertmanager_main_secret,
|
||||
Some(openshift_monitoring_namespace),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let additional_resources = additional_resources.concat();
|
||||
trace!("Applying additional ressources for alert receivers {additional_resources:#?}");
|
||||
client
|
||||
.apply_dynamic_many(
|
||||
&additional_resources,
|
||||
Some(openshift_monitoring_namespace),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Successfully configured {} cluster alert receivers: {}",
|
||||
self.receivers.len(),
|
||||
self.receivers
|
||||
.iter()
|
||||
.map(|r| r.name())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OpenshiftClusterAlertInterpret")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
90
harmony/src/modules/monitoring/okd/config.rs
Normal file
90
harmony/src/modules/monitoring/okd/config.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
topology::k8s::K8sClient,
|
||||
};
|
||||
use k8s_openapi::api::core::v1::ConfigMap;
|
||||
use kube::api::ObjectMeta;
|
||||
|
||||
pub(crate) struct Config;
|
||||
|
||||
impl Config {
|
||||
pub async fn create_cluster_monitoring_config_cm(
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut data = BTreeMap::new();
|
||||
data.insert(
|
||||
"config.yaml".to_string(),
|
||||
r#"
|
||||
enableUserWorkload: true
|
||||
alertmanagerMain:
|
||||
enableUserAlertmanagerConfig: true
|
||||
"#
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
let cm = ConfigMap {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("cluster-monitoring-config".to_string()),
|
||||
namespace: Some("openshift-monitoring".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
data: Some(data),
|
||||
..Default::default()
|
||||
};
|
||||
client.apply(&cm, Some("openshift-monitoring")).await?;
|
||||
|
||||
Ok(Outcome::success(
|
||||
"updated cluster-monitoring-config-map".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn create_user_workload_monitoring_config_cm(
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut data = BTreeMap::new();
|
||||
data.insert(
|
||||
"config.yaml".to_string(),
|
||||
r#"
|
||||
alertmanager:
|
||||
enabled: true
|
||||
enableAlertmanagerConfig: true
|
||||
"#
|
||||
.to_string(),
|
||||
);
|
||||
let cm = ConfigMap {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("user-workload-monitoring-config".to_string()),
|
||||
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
data: Some(data),
|
||||
..Default::default()
|
||||
};
|
||||
client
|
||||
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(
|
||||
"updated openshift-user-monitoring-config-map".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn verify_user_workload(client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||
let namespace = "openshift-user-workload-monitoring";
|
||||
let alertmanager_name = "alertmanager-user-workload-0";
|
||||
let prometheus_name = "prometheus-user-workload-0";
|
||||
client
|
||||
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
||||
.await?;
|
||||
client
|
||||
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"pods: {}, {} ready in ns: {}",
|
||||
alertmanager_name, prometheus_name, namespace
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,13 @@
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::monitoring::okd::config::Config,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use k8s_openapi::api::core::v1::ConfigMap;
|
||||
use kube::api::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
@@ -37,10 +34,9 @@ impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringIn
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let client = topology.k8s_client().await.unwrap();
|
||||
self.update_cluster_monitoring_config_cm(&client).await?;
|
||||
self.update_user_workload_monitoring_config_cm(&client)
|
||||
.await?;
|
||||
self.verify_user_workload(&client).await?;
|
||||
Config::create_cluster_monitoring_config_cm(&client).await?;
|
||||
Config::create_user_workload_monitoring_config_cm(&client).await?;
|
||||
Config::verify_user_workload(&client).await?;
|
||||
Ok(Outcome::success(
|
||||
"successfully enabled user-workload-monitoring".to_string(),
|
||||
))
|
||||
@@ -62,88 +58,3 @@ impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringIn
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenshiftUserWorkloadMonitoringInterpret {
|
||||
pub async fn update_cluster_monitoring_config_cm(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut data = BTreeMap::new();
|
||||
data.insert(
|
||||
"config.yaml".to_string(),
|
||||
r#"
|
||||
enableUserWorkload: true
|
||||
alertmanagerMain:
|
||||
enableUserAlertmanagerConfig: true
|
||||
"#
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
let cm = ConfigMap {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("cluster-monitoring-config".to_string()),
|
||||
namespace: Some("openshift-monitoring".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
data: Some(data),
|
||||
..Default::default()
|
||||
};
|
||||
client.apply(&cm, Some("openshift-monitoring")).await?;
|
||||
|
||||
Ok(Outcome::success(
|
||||
"updated cluster-monitoring-config-map".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn update_user_workload_monitoring_config_cm(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut data = BTreeMap::new();
|
||||
data.insert(
|
||||
"config.yaml".to_string(),
|
||||
r#"
|
||||
alertmanager:
|
||||
enabled: true
|
||||
enableAlertmanagerConfig: true
|
||||
"#
|
||||
.to_string(),
|
||||
);
|
||||
let cm = ConfigMap {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("user-workload-monitoring-config".to_string()),
|
||||
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
data: Some(data),
|
||||
..Default::default()
|
||||
};
|
||||
client
|
||||
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(
|
||||
"updated openshift-user-monitoring-config-map".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn verify_user_workload(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let namespace = "openshift-user-workload-monitoring";
|
||||
let alertmanager_name = "alertmanager-user-workload-0";
|
||||
let prometheus_name = "prometheus-user-workload-0";
|
||||
client
|
||||
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
||||
.await?;
|
||||
client
|
||||
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"pods: {}, {} ready in ns: {}",
|
||||
alertmanager_name, prometheus_name, namespace
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1,14 @@
|
||||
use crate::topology::oberservability::monitoring::AlertSender;
|
||||
|
||||
pub mod cluster_monitoring;
|
||||
pub(crate) mod config;
|
||||
pub mod enable_user_workload;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OpenshiftClusterAlertSender;
|
||||
|
||||
impl AlertSender for OpenshiftClusterAlertSender {
|
||||
fn name(&self) -> String {
|
||||
"OpenshiftClusterAlertSender".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
pub mod node_exporter;
|
||||
mod shell;
|
||||
mod upgrade;
|
||||
pub use shell::*;
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{Topology, node_exporter::NodeExporter},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct NodeExporterScore {}
|
||||
|
||||
impl<T: Topology + NodeExporter> Score<T> for NodeExporterScore {
|
||||
fn name(&self) -> String {
|
||||
"NodeExporterScore".to_string()
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(NodeExporterInterpret {})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NodeExporterInterpret {}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + NodeExporter> Interpret<T> for NodeExporterInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
node_exporter: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!(
|
||||
"Making sure node exporter is initiailized: {:?}",
|
||||
node_exporter.ensure_initialized().await?
|
||||
);
|
||||
|
||||
info!("Applying Node Exporter configuration");
|
||||
|
||||
node_exporter.commit_config().await?;
|
||||
|
||||
info!("Reloading and restarting Node Exporter");
|
||||
|
||||
node_exporter.reload_restart().await?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"NodeExporter successfully configured"
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("NodeExporter")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -9,7 +9,7 @@ pub struct Interface {
|
||||
pub physical_interface_name: String,
|
||||
pub descr: Option<MaybeString>,
|
||||
pub mtu: Option<MaybeString>,
|
||||
pub enable: MaybeString,
|
||||
pub enable: Option<MaybeString>,
|
||||
pub lock: Option<MaybeString>,
|
||||
#[yaserde(rename = "spoofmac")]
|
||||
pub spoof_mac: Option<MaybeString>,
|
||||
@@ -134,19 +134,15 @@ mod test {
|
||||
<interfaces>
|
||||
<paul>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</paul>
|
||||
<anotherpaul>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</anotherpaul>
|
||||
<thirdone>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</thirdone>
|
||||
<andgofor4>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</andgofor4>
|
||||
</interfaces>
|
||||
<bar>foo</bar>
|
||||
|
||||
@@ -17,7 +17,7 @@ pub struct OPNsense {
|
||||
pub interfaces: NamedList<Interface>,
|
||||
pub dhcpd: NamedList<DhcpInterface>,
|
||||
pub snmpd: Snmpd,
|
||||
pub syslog: Option<Syslog>,
|
||||
pub syslog: Syslog,
|
||||
pub nat: Nat,
|
||||
pub filter: Filters,
|
||||
pub load_balancer: Option<LoadBalancer>,
|
||||
@@ -190,7 +190,7 @@ pub struct System {
|
||||
pub webgui: WebGui,
|
||||
pub usevirtualterminal: u8,
|
||||
pub disablenatreflection: Option<String>,
|
||||
pub disableconsolemenu: Option<u8>,
|
||||
pub disableconsolemenu: u8,
|
||||
pub disablevlanhwfilter: u8,
|
||||
pub disablechecksumoffloading: u8,
|
||||
pub disablesegmentationoffloading: u8,
|
||||
@@ -216,7 +216,7 @@ pub struct System {
|
||||
pub maximumfrags: Option<MaybeString>,
|
||||
pub aliasesresolveinterval: Option<MaybeString>,
|
||||
pub maximumtableentries: Option<MaybeString>,
|
||||
pub language: Option<String>,
|
||||
pub language: String,
|
||||
pub dnsserver: Option<MaybeString>,
|
||||
pub dns1gw: Option<String>,
|
||||
pub dns2gw: Option<String>,
|
||||
@@ -233,16 +233,16 @@ pub struct System {
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Ssh {
|
||||
pub group: String,
|
||||
pub noauto: Option<u8>,
|
||||
pub interfaces: Option<MaybeString>,
|
||||
pub kex: Option<MaybeString>,
|
||||
pub ciphers: Option<MaybeString>,
|
||||
pub macs: Option<MaybeString>,
|
||||
pub keys: Option<MaybeString>,
|
||||
pub enabled: Option<String>,
|
||||
pub passwordauth: Option<u8>,
|
||||
pub keysig: Option<MaybeString>,
|
||||
pub permitrootlogin: Option<u8>,
|
||||
pub noauto: u8,
|
||||
pub interfaces: MaybeString,
|
||||
pub kex: MaybeString,
|
||||
pub ciphers: MaybeString,
|
||||
pub macs: MaybeString,
|
||||
pub keys: MaybeString,
|
||||
pub enabled: String,
|
||||
pub passwordauth: u8,
|
||||
pub keysig: MaybeString,
|
||||
pub permitrootlogin: u8,
|
||||
pub rekeylimit: Option<MaybeString>,
|
||||
}
|
||||
|
||||
@@ -306,11 +306,11 @@ pub struct WebGui {
|
||||
pub protocol: String,
|
||||
#[yaserde(rename = "ssl-certref")]
|
||||
pub ssl_certref: String,
|
||||
pub port: Option<MaybeString>,
|
||||
pub port: MaybeString,
|
||||
#[yaserde(rename = "ssl-ciphers")]
|
||||
pub ssl_ciphers: Option<MaybeString>,
|
||||
pub interfaces: Option<MaybeString>,
|
||||
pub compression: Option<MaybeString>,
|
||||
pub ssl_ciphers: MaybeString,
|
||||
pub interfaces: MaybeString,
|
||||
pub compression: MaybeString,
|
||||
pub nohttpreferercheck: Option<u8>,
|
||||
}
|
||||
|
||||
@@ -433,7 +433,7 @@ pub struct OPNsenseXmlSection {
|
||||
#[yaserde(rename = "Interfaces")]
|
||||
pub interfaces: Option<ConfigInterfaces>,
|
||||
#[yaserde(rename = "NodeExporter")]
|
||||
pub node_exporter: Option<NodeExporter>,
|
||||
pub node_exporter: Option<RawXml>,
|
||||
#[yaserde(rename = "Kea")]
|
||||
pub kea: Option<RawXml>,
|
||||
pub monit: Option<Monit>,
|
||||
@@ -1595,21 +1595,3 @@ pub struct Ifgroups {
|
||||
#[yaserde(attribute = true)]
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct NodeExporter {
|
||||
pub enabled: u8,
|
||||
pub listenaddress: Option<MaybeString>,
|
||||
pub listenport: u16,
|
||||
pub cpu: u8,
|
||||
pub exec: u8,
|
||||
pub filesystem: u8,
|
||||
pub loadavg: u8,
|
||||
pub meminfo: u8,
|
||||
pub netdev: u8,
|
||||
pub time: u8,
|
||||
pub devstat: u8,
|
||||
pub interrupts: u8,
|
||||
pub ntp: u8,
|
||||
pub zfs: u8,
|
||||
}
|
||||
|
||||
@@ -5,8 +5,7 @@ use crate::{
|
||||
error::Error,
|
||||
modules::{
|
||||
caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::UnboundDnsConfig,
|
||||
dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig,
|
||||
node_exporter::NodeExporterConfig, tftp::TftpConfig,
|
||||
dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig,
|
||||
},
|
||||
};
|
||||
use log::{debug, info, trace, warn};
|
||||
@@ -14,7 +13,6 @@ use opnsense_config_xml::OPNsense;
|
||||
use russh::client;
|
||||
use serde::Serialize;
|
||||
use sha2::Digest;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
use super::{ConfigManager, OPNsenseShell};
|
||||
|
||||
@@ -73,10 +71,6 @@ impl Config {
|
||||
LoadBalancerConfig::new(&mut self.opnsense, self.shell.clone())
|
||||
}
|
||||
|
||||
pub fn node_exporter(&mut self) -> NodeExporterConfig<'_> {
|
||||
NodeExporterConfig::new(&mut self.opnsense, self.shell.clone())
|
||||
}
|
||||
|
||||
pub async fn upload_files(&self, source: &str, destination: &str) -> Result<String, Error> {
|
||||
self.shell.upload_folder(source, destination).await
|
||||
}
|
||||
@@ -156,8 +150,7 @@ impl Config {
|
||||
|
||||
async fn reload_config(&mut self) -> Result<(), Error> {
|
||||
info!("Reloading opnsense live config");
|
||||
let (opnsense, _sha2) = Self::get_opnsense_instance(self.repository.clone()).await?;
|
||||
self.opnsense = opnsense;
|
||||
let (opnsense, sha2) = Self::get_opnsense_instance(self.repository.clone()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -4,5 +4,4 @@ pub mod dhcp_legacy;
|
||||
pub mod dns;
|
||||
pub mod dnsmasq;
|
||||
pub mod load_balancer;
|
||||
pub mod node_exporter;
|
||||
pub mod tftp;
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use opnsense_config_xml::{NodeExporter, OPNsense};
|
||||
|
||||
use crate::{config::OPNsenseShell, Error};
|
||||
|
||||
pub struct NodeExporterConfig<'a> {
|
||||
opnsense: &'a mut OPNsense,
|
||||
opnsense_shell: Arc<dyn OPNsenseShell>,
|
||||
}
|
||||
|
||||
impl<'a> NodeExporterConfig<'a> {
|
||||
pub fn new(opnsense: &'a mut OPNsense, opnsense_shell: Arc<dyn OPNsenseShell>) -> Self {
|
||||
Self {
|
||||
opnsense,
|
||||
opnsense_shell,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_full_config(&self) -> &Option<NodeExporter> {
|
||||
&self.opnsense.opnsense.node_exporter
|
||||
}
|
||||
|
||||
fn with_node_exporter<F, R>(&mut self, f: F) -> Result<R, &'static str>
|
||||
where
|
||||
F: FnOnce(&mut NodeExporter) -> R,
|
||||
{
|
||||
match &mut self.opnsense.opnsense.node_exporter.as_mut() {
|
||||
Some(node_exporter) => Ok(f(node_exporter)),
|
||||
None => Err("node exporter is not yet installed"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable(&mut self, enabled: bool) -> Result<(), &'static str> {
|
||||
self.with_node_exporter(|node_exporter| node_exporter.enabled = enabled as u8)
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
pub async fn reload_restart(&self) -> Result<(), Error> {
|
||||
self.opnsense_shell
|
||||
.exec("configctl node_exporter stop")
|
||||
.await?;
|
||||
self.opnsense_shell
|
||||
.exec("configctl template reload OPNsense/NodeExporter")
|
||||
.await?;
|
||||
self.opnsense_shell
|
||||
.exec("configctl node_exporter configtest")
|
||||
.await?;
|
||||
self.opnsense_shell
|
||||
.exec("configctl node_exporter start")
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user