working on ntfy score
This commit is contained in:
parent
80e209d333
commit
6a29969c7f
729
Cargo.lock
generated
729
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@ -24,13 +24,25 @@ log = "0.4"
|
||||
env_logger = "0.11"
|
||||
derive-new = "0.7"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1.40", features = ["io-std", "fs", "macros", "rt-multi-thread"] }
|
||||
tokio = { version = "1.40", features = [
|
||||
"io-std",
|
||||
"fs",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
cidr = { features = ["serde"], version = "0.2" }
|
||||
russh = "0.45"
|
||||
russh-keys = "0.45"
|
||||
rand = "0.8"
|
||||
url = "2.5"
|
||||
kube = "0.98"
|
||||
kube = { version = "0.98", features = [
|
||||
"config",
|
||||
"client",
|
||||
"runtime",
|
||||
"rustls-tls",
|
||||
"ws",
|
||||
"jsonpatch",
|
||||
] }
|
||||
k8s-openapi = { version = "0.24", features = ["v1_30"] }
|
||||
serde_yaml = "0.9"
|
||||
serde-value = "0.7"
|
||||
@ -39,4 +51,4 @@ inquire = "0.7"
|
||||
convert_case = "0.8"
|
||||
chrono = "0.4"
|
||||
similar = "2"
|
||||
uuid = { version = "1.11", features = [ "v4", "fast-rng", "macro-diagnostics" ] }
|
||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
|
@ -15,7 +15,7 @@ log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
kube = "0.98.0"
|
||||
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
|
||||
k8s-openapi = { version = "0.25.0", features = ["v1_30"] }
|
||||
http = "1.2.0"
|
||||
serde_yaml = "0.9.34"
|
||||
inquire.workspace = true
|
||||
|
12
examples/ntfy/Cargo.toml
Normal file
12
examples/ntfy/Cargo.toml
Normal file
@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "example-ntfy"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
43
examples/ntfy/src/main.rs
Normal file
43
examples/ntfy/src/main.rs
Normal file
@ -0,0 +1,43 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let mut ntfy_overrides: HashMap<NonBlankString, String> = HashMap::new();
|
||||
ntfy_overrides.insert(
|
||||
NonBlankString::from_str("image.tag").unwrap(),
|
||||
"v2.12.0".to_string(),
|
||||
);
|
||||
|
||||
let ntfy_chart = HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str("monitoring").unwrap()),
|
||||
release_name: NonBlankString::from_str("ntfy").unwrap(),
|
||||
chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(),
|
||||
chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()),
|
||||
values_overrides: Some(ntfy_overrides),
|
||||
values_yaml: None,
|
||||
create_namespace: true,
|
||||
install_only: false,
|
||||
repository: Some(HelmRepository::new(
|
||||
"sarab97".to_string(),
|
||||
url::Url::parse("https://charts.sarabsingh.com").unwrap(),
|
||||
true,
|
||||
)),
|
||||
};
|
||||
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(ntfy_chart)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
@ -54,3 +54,4 @@ fqdn = { version = "0.4.6", features = [
|
||||
temp-dir = "0.1.14"
|
||||
dyn-clone = "1.0.19"
|
||||
similar.workspace = true
|
||||
futures-util = "0.3.31"
|
||||
|
@ -1,14 +1,28 @@
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{ClusterResourceScope, NamespaceResourceScope};
|
||||
use futures_util::TryStreamExt;
|
||||
use k8s_openapi::{
|
||||
ClusterResourceScope, NamespaceResourceScope,
|
||||
api::{apps::v1::Deployment, core::v1::Pod},
|
||||
};
|
||||
use kube::runtime::conditions;
|
||||
use kube::runtime::wait::{Condition, await_condition};
|
||||
use kube::{
|
||||
Api, Client, Config, Error, Resource,
|
||||
api::{Patch, PatchParams},
|
||||
Client, Config, Error, Resource,
|
||||
api::{
|
||||
Api, AttachParams, AttachedProcess, DeleteParams, ListParams, Patch, PatchParams,
|
||||
PostParams, ResourceExt, WatchEvent, WatchParams,
|
||||
},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
runtime::{
|
||||
WatchStreamExt, metadata_watcher,
|
||||
reflector::Lookup,
|
||||
watcher::{self, watch_object},
|
||||
},
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
use serde::de::DeserializeOwned;
|
||||
use similar::TextDiff;
|
||||
use similar::{DiffableStr, TextDiff};
|
||||
|
||||
#[derive(new)]
|
||||
pub struct K8sClient {
|
||||
@ -22,6 +36,65 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn wait_until_deployment_ready(
|
||||
&self,
|
||||
name: String,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
let api: Api<Deployment>;
|
||||
|
||||
if let Some(ns) = namespace {
|
||||
api = Api::namespaced(self.client.clone(), ns);
|
||||
} else {
|
||||
api = Api::default_namespaced(self.client.clone());
|
||||
}
|
||||
|
||||
// need to upgrade to latest kube-rs version https://docs.rs/kube-runtime/latest/kube_runtime/wait/conditions/fn.is_deployment_completed.html
|
||||
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
||||
let _ = tokio::time::timeout(std::time::Duration::from_secs(300), establish).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn exec_pod(
|
||||
&self,
|
||||
name: String,
|
||||
namespace: Option<&str>,
|
||||
command: Vec<String>,
|
||||
) -> Result<(), String> {
|
||||
let api: Api<Pod>;
|
||||
|
||||
if let Some(ns) = namespace {
|
||||
api = Api::namespaced(self.client.clone(), ns);
|
||||
} else {
|
||||
api = Api::default_namespaced(self.client.clone());
|
||||
}
|
||||
let pod_list = api
|
||||
.list(&ListParams::default().labels(format!("app.kubernetes.io/name={name}").as_str()))
|
||||
.await
|
||||
.expect("couldn't get list of pods");
|
||||
|
||||
if pod_list.items.len() > 1 {
|
||||
return Err("too many pods".into());
|
||||
} else {
|
||||
api.exec(
|
||||
pod_list
|
||||
.items
|
||||
.first()
|
||||
.expect("couldn't get pod")
|
||||
.name()
|
||||
.expect("couldn't get pod name")
|
||||
.into_owned()
|
||||
.as_str(),
|
||||
command,
|
||||
&AttachParams::default(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply a resource in namespace
|
||||
///
|
||||
/// See `kubectl apply` for more information on the expected behavior of this function
|
||||
|
@ -1,9 +1,6 @@
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::modules::monitoring::{
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig},
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct KubePrometheusConfig {
|
||||
|
@ -1,3 +1,4 @@
|
||||
pub mod alert_channel;
|
||||
pub mod alert_rule;
|
||||
pub mod kube_prometheus;
|
||||
pub mod ntfy;
|
||||
|
6
harmony/src/modules/monitoring/ntfy/helm/config.rs
Normal file
6
harmony/src/modules/monitoring/ntfy/helm/config.rs
Normal file
@ -0,0 +1,6 @@
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct NtfyConfig {
|
||||
pub namespace: String,
|
||||
}
|
2
harmony/src/modules/monitoring/ntfy/helm/mod.rs
Normal file
2
harmony/src/modules/monitoring/ntfy/helm/mod.rs
Normal file
@ -0,0 +1,2 @@
|
||||
pub mod config;
|
||||
pub mod ntfy_helm_chart;
|
91
harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs
Normal file
91
harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs
Normal file
@ -0,0 +1,91 @@
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use std::{
|
||||
str::FromStr,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use crate::modules::{
|
||||
helm::chart::{HelmChartScore, HelmRepository},
|
||||
monitoring::ntfy::helm::config::NtfyConfig,
|
||||
};
|
||||
|
||||
pub fn ntfy_helm_chart_score(config: Arc<Mutex<NtfyConfig>>) -> HelmChartScore {
|
||||
let config = config.lock().unwrap();
|
||||
|
||||
let values = format!(
|
||||
r#"
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: binwiederhier/ntfy
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "v2.12.0"
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
# annotations:
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
# annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: ntfy.host.com
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
|
||||
config:
|
||||
enabled: true
|
||||
data:
|
||||
# base-url: "https://ntfy.something.com"
|
||||
auth-file: "/var/cache/ntfy/user.db"
|
||||
auth-default-access: "deny-all"
|
||||
cache-file: "/var/cache/ntfy/cache.db"
|
||||
attachment-cache-dir: "/var/cache/ntfy/attachments"
|
||||
behind-proxy: true
|
||||
# web-root: "disable"
|
||||
enable-signup: false
|
||||
enable-login: "true"
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 200Mi
|
||||
"#,
|
||||
);
|
||||
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(&config.namespace).unwrap()),
|
||||
release_name: NonBlankString::from_str("ntfy").unwrap(),
|
||||
chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(),
|
||||
chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()),
|
||||
values_overrides: None,
|
||||
values_yaml: Some(values.to_string()),
|
||||
create_namespace: true,
|
||||
install_only: false,
|
||||
repository: Some(HelmRepository::new(
|
||||
"sarab97".to_string(),
|
||||
url::Url::parse("https://charts.sarabsingh.com").unwrap(),
|
||||
true,
|
||||
)),
|
||||
}
|
||||
}
|
2
harmony/src/modules/monitoring/ntfy/mod.rs
Normal file
2
harmony/src/modules/monitoring/ntfy/mod.rs
Normal file
@ -0,0 +1,2 @@
|
||||
pub mod helm;
|
||||
pub mod ntfy;
|
33
harmony/src/modules/monitoring/ntfy/ntfy.rs
Normal file
33
harmony/src/modules/monitoring/ntfy/ntfy.rs
Normal file
@ -0,0 +1,33 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::monitoring::ntfy::helm::{config::NtfyConfig, ntfy_helm_chart::ntfy_helm_chart_score},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology},
|
||||
};
|
||||
|
||||
pub struct Ntfy {
|
||||
pub config: Arc<Mutex<NtfyConfig>>,
|
||||
}
|
||||
|
||||
impl Ntfy {
|
||||
async fn install_ntfy<T: Topology + HelmCommand + K8sclient + Send + Sync>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let result = ntfy_helm_chart_score(self.config.clone())
|
||||
.create_interpret()
|
||||
.execute(inventory, topology)
|
||||
.await;
|
||||
|
||||
let client = topology.k8s_client().await.expect("couldn't get k8s client");
|
||||
|
||||
client.wait_until_deployment_ready("ntfy", self.config.get_mut().expect("couldn't get config").namespace);
|
||||
client.
|
||||
|
||||
result
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user