Compare commits

..

3 Commits

160 changed files with 1504 additions and 7984 deletions

View File

@@ -9,7 +9,7 @@ jobs:
check:
runs-on: docker
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -7,7 +7,7 @@ on:
jobs:
package_harmony_composer:
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
runs-on: dind
steps:
- name: Checkout code
@@ -45,14 +45,14 @@ jobs:
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
| jq -r '.id // empty')
if [ -n "$RELEASE_ID" ]; then
# Delete existing release
curl -X DELETE \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
fi
# Create new release
RESPONSE=$(curl -X POST \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
@@ -65,7 +65,7 @@ jobs:
"prerelease": true
}' \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
- name: Upload Linux binary

29
.gitignore vendored
View File

@@ -1,25 +1,4 @@
### General ###
private_repos/
### Harmony ###
harmony.log
### Helm ###
# Chart dependencies
**/charts/*.tgz
### Rust ###
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
target
private_repos
log/
*.tgz

861
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,6 @@ members = [
"harmony_cli",
"k3d",
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret",
]
[workspace.package]
@@ -23,7 +20,7 @@ readme = "README.md"
license = "GNU AGPL v3"
[workspace.dependencies]
log = { version = "0.4", features = ["kv"] }
log = "0.4"
env_logger = "0.11"
derive-new = "0.7"
async-trait = "0.1"
@@ -55,13 +52,3 @@ convert_case = "0.8"
chrono = "0.4"
similar = "2"
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
pretty_assertions = "1.4.1"
tempfile = "3.20.0"
bollard = "0.19.1"
base64 = "0.22.1"
tar = "0.4.44"
lazy_static = "1.5.0"
directories = "6.0.0"
thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"

View File

@@ -1,4 +1,4 @@
FROM docker.io/rust:1.89.0 AS build
FROM docker.io/rust:1.87.0 AS build
WORKDIR /app
@@ -6,14 +6,13 @@ COPY . .
RUN cargo build --release --bin harmony_composer
FROM docker.io/rust:1.89.0
FROM docker.io/rust:1.87.0
WORKDIR /app
RUN rustup target add x86_64-pc-windows-gnu
RUN rustup target add x86_64-unknown-linux-gnu
RUN rustup component add rustfmt
RUN rustup component add clippy
RUN apt update
@@ -23,4 +22,4 @@ RUN apt install -y nodejs docker.io mingw-w64
COPY --from=build /app/target/release/harmony_composer .
ENTRYPOINT ["/app/harmony_composer"]
ENTRYPOINT ["/app/harmony_composer"]

View File

@@ -1,6 +1,5 @@
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
_By [NationTech](https://nationtech.io)_
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code.
*By [NationTech](https://nationtech.io)*
[![Build](https://git.nationtech.io/NationTech/harmony/actions/workflows/check.yml/badge.svg)](https://git.nationtech.io/nationtech/harmony)
[![License](https://img.shields.io/badge/license-AGPLv3-blue?style=flat-square)](LICENSE)
@@ -24,11 +23,11 @@ From a **developer laptop** to a **global production cluster**, a single **sourc
Infrastructure is essential, but it shouldnt be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
| Principle | What it means for you |
| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your applications needs match the target environments capabilities at **compile-time**, eliminating an entire class of runtime outages. |
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
| Principle | What it means for you |
|-----------|-----------------------|
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your applications needs match the target environments capabilities at **compile-time**, eliminating an entire class of runtime outages. |
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
@@ -64,20 +63,22 @@ async fn main() {
},
};
// 2. Enhance with extra scores (monitoring, CI/CD, …)
// 2. Pick where it should run
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
)
.await
.unwrap();
// 3. Enhance with extra scores (monitoring, CI/CD, …)
let mut monitoring = MonitoringAlertingStackScore::new();
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
// 3. Run your scores on the desired topology & inventory
harmony_cli::run(
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
vec![
Box::new(lamp_stack),
Box::new(monitoring)
],
None
).await.unwrap();
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring)]);
// 4. Launch an interactive CLI / TUI
harmony_cli::init(maestro, None).await.unwrap();
}
```
@@ -93,13 +94,13 @@ Harmony analyses the code, shows an execution plan in a TUI, and applies it once
## 3 · Core Concepts
| Term | One-liner |
| ---------------- | ---------------------------------------------------------------------------------------------------- |
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
| Term | One-liner |
|------|-----------|
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified *Capabilities* (Kubernetes, DNS, …). |
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
A visual overview is in the diagram below.
@@ -111,9 +112,9 @@ A visual overview is in the diagram below.
Prerequisites:
- Rust
- Docker (if you deploy locally)
- `kubectl` / `helm` for Kubernetes-based topologies
* Rust
* Docker (if you deploy locally)
* `kubectl` / `helm` for Kubernetes-based topologies
```bash
git clone https://git.nationtech.io/nationtech/harmony
@@ -125,15 +126,15 @@ cargo build --release # builds the CLI, TUI and libraries
## 5 · Learning More
- **Architectural Decision Records** dive into the rationale
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
* **Architectural Decision Records** dive into the rationale
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
- **Extending Harmony** write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
* **Extending Harmony** write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
- **Community** discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
* **Community** discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
---
@@ -147,4 +148,4 @@ See [LICENSE](LICENSE) for the full text.
---
_Made with ❤️ & 🦀 by the NationTech and the Harmony community_
*Made with ❤️ & 🦀 by the NationTech and the Harmony community*

View File

@@ -1,7 +1,5 @@
#!/bin/sh
set -e
cargo check --all-targets --all-features --keep-going
cargo fmt --check
cargo clippy
cargo test

View File

@@ -1,14 +0,0 @@
[package]
name = "example-application-monitoring-with-tenant"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
env_logger.workspace = true
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
logging = "0.1.0"
tokio.workspace = true
url.workspace = true

View File

@@ -1,55 +0,0 @@
use std::{path::PathBuf, str::FromStr, sync::Arc};
use harmony::{
data::Id,
inventory::Inventory,
modules::{
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
tenant::TenantScore,
},
topology::{K8sAnywhereTopology, Url, tenant::TenantConfig},
};
#[tokio::main]
async fn main() {
//TODO there is a bug where the application is deployed into the namespace matching the
//application name and the tenant is created in the namesapce matching the tenant name
//in order for the application to be deployed in the tenant namespace the application.name and
//the TenantConfig.name must match
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
name: "example-monitoring".to_string(),
..Default::default()
},
};
let application = Arc::new(RustWebapp {
name: "example-monitoring".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
});
let webhook_receiver = WebhookReceiver {
name: "sample-webhook-receiver".to_string(),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![Box::new(Monitoring {
alert_receiver: vec![Box::new(webhook_receiver)],
application: application.clone(),
})],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant), Box::new(app)],
None,
)
.await
.unwrap();
}

View File

@@ -1,21 +1,20 @@
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::LocalhostTopology,
};
#[tokio::main]
async fn main() {
harmony_cli::run(
Inventory::autoload(),
LocalhostTopology::new(),
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
None,
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = LocalhostTopology::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -125,47 +125,40 @@ spec:
name: nginx"#,
)
.unwrap();
deployment
return deployment;
}
fn nginx_deployment_2() -> Deployment {
let pod_template = PodTemplateSpec {
metadata: Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
let mut pod_template = PodTemplateSpec::default();
pod_template.metadata = Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
..Default::default()
});
pod_template.spec = Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}),
spec: Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}],
..Default::default()
}),
}],
..Default::default()
});
let mut spec = DeploymentSpec::default();
spec.template = pod_template;
spec.selector = LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
};
let spec = DeploymentSpec {
template: pod_template,
selector: LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
},
..Default::default()
};
let mut deployment = Deployment::default();
deployment.spec = Some(spec);
deployment.metadata.name = Some("nginx-test".to_string());
Deployment {
spec: Some(spec),
metadata: ObjectMeta {
name: Some("nginx-test".to_string()),
..Default::default()
},
..Default::default()
}
deployment
}
fn nginx_deployment() -> Deployment {

View File

@@ -1,6 +1,7 @@
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::lamp::{LAMPConfig, LAMPScore},
topology::{K8sAnywhereTopology, Url},
};
@@ -23,7 +24,7 @@ async fn main() {
// This config can be extended as needed for more complicated configurations
config: LAMPConfig {
project_root: "./php".into(),
database_size: "4Gi".to_string().into(),
database_size: format!("4Gi").into(),
..Default::default()
},
};
@@ -42,13 +43,15 @@ async fn main() {
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
// locally, to development environment from a CI, to staging, and to production with settings
// that automatically adapt to each environment grade.
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(lamp_stack)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(lamp_stack)]);
// Here we bootstrap the CLI, this gives some nice features if you need them
harmony_cli::init(maestro, None).await.unwrap();
}
// That's it, end of the infra as code.

View File

@@ -2,6 +2,7 @@ use std::collections::HashMap;
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
@@ -50,8 +51,8 @@ async fn main() {
let service_monitor_endpoint = ServiceMonitorEndpoint {
port: Some("80".to_string()),
path: Some("/metrics".to_string()),
scheme: Some(HTTPScheme::HTTP),
path: "/metrics".to_string(),
scheme: HTTPScheme::HTTP,
..Default::default()
};
@@ -73,13 +74,13 @@ async fn main() {
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
service_monitors: vec![service_monitor],
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(alerting_score)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(alerting_score)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -1,8 +1,9 @@
use std::{collections::HashMap, str::FromStr};
use std::collections::HashMap;
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
@@ -28,7 +29,7 @@ use harmony::{
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("1234").unwrap(),
id: Id::from_string("1234".to_string()),
name: "test-tenant".to_string(),
resource_limits: ResourceLimits {
cpu_request_cores: 6.0,
@@ -53,8 +54,8 @@ async fn main() {
let service_monitor_endpoint = ServiceMonitorEndpoint {
port: Some("80".to_string()),
path: Some("/metrics".to_string()),
scheme: Some(HTTPScheme::HTTP),
path: "/metrics".to_string(),
scheme: HTTPScheme::HTTP,
..Default::default()
};
@@ -77,13 +78,13 @@ async fn main() {
rules: vec![Box::new(additional_rules)],
service_monitors: vec![service_monitor],
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant), Box::new(alerting_score)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(tenant), Box::new(alerting_score)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -8,8 +8,9 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
http::StaticFilesHttpScore,
http::HttpScore,
ipxe::IpxeScore,
okd::{
bootstrap_dhcp::OKDBootstrapDhcpScore,
@@ -125,25 +126,20 @@ async fn main() {
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
let http_score = HttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let ipxe_score = IpxeScore::new();
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
],
)
.await
.unwrap();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -1,18 +1,19 @@
use harmony::{
inventory::Inventory, modules::monitoring::ntfy::ntfy::NtfyScore, topology::K8sAnywhereTopology,
inventory::Inventory, maestro::Maestro, modules::monitoring::ntfy::ntfy::NtfyScore,
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(NtfyScore {
namespace: "monitoring".to_string(),
host: "localhost".to_string(),
})],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(NtfyScore {
namespace: "monitoring".to_string(),
})]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -8,9 +8,10 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::StaticFilesHttpScore,
http::HttpScore,
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
opnsense::OPNsenseShellCommandScore,
tftp::TftpScore,
@@ -80,28 +81,23 @@ async fn main() {
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
let http_score = HttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
)
.await
.unwrap();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -1,3 +0,0 @@
Dockerfile.harmony
.harmony_generated
harmony

View File

@@ -12,4 +12,3 @@ tokio = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
base64.workspace = true

View File

@@ -2,13 +2,18 @@ use std::{path::PathBuf, sync::Arc};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
features::{ContinuousDelivery, PrometheusMonitoring},
},
monitoring::alert_channel::{
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
},
prometheus::alerts::k8s::{
pod::pod_in_failed_state, pvc::high_pvc_fill_rate_over_two_days,
},
},
topology::{K8sAnywhereTopology, Url},
@@ -16,43 +21,42 @@ use harmony::{
#[tokio::main]
async fn main() {
env_logger::init();
let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
});
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
};
let webhook_receiver = WebhookReceiver {
name: "sample-webhook-receiver".to_string(),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
};
let pod_failed = pod_in_failed_state();
let pod_failed_2 = pod_in_failed_state();
let pod_failed_3 = pod_in_failed_state();
let additional_rules = AlertManagerRuleGroup::new("pod-alerts", vec![pod_failed]);
let additional_rules_2 = AlertManagerRuleGroup::new("pod-alerts-2", vec![pod_failed_2, pod_failed_3]);
let app = ApplicationScore {
features: vec![
Box::new(ContinuousDelivery {
//Box::new(ContinuousDelivery {
// application: application.clone(),
//}),
Box::new(PrometheusMonitoring {
application: application.clone(),
alert_receivers: vec![Box::new(DiscordWebhook {
name: "dummy-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
})],
alert_rules: vec![Box::new(additional_rules), Box::new(additional_rules_2)],
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver), Box::new(webhook_receiver)],
}),
// TODO add backups, multisite ha, etc
// TODO add monitoring, backups, multisite ha, etc
],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(app)],
None,
)
.await
.unwrap();
let topology = K8sAnywhereTopology::from_env();
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
.await
.unwrap();
maestro.register_all(vec![Box::new(app)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -0,0 +1,16 @@
FROM rust:bookworm as builder
RUN apt-get update && apt-get install -y --no-install-recommends clang wget && wget https://github.com/cargo-bins/cargo-binstall/releases/latest/download/cargo-binstall-x86_64-unknown-linux-musl.tgz && tar -xvf cargo-binstall-x86_64-unknown-linux-musl.tgz && cp cargo-binstall /usr/local/cargo/bin && rm cargo-binstall-x86_64-unknown-linux-musl.tgz cargo-binstall && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN cargo binstall cargo-leptos -y
RUN rustup target add wasm32-unknown-unknown
WORKDIR /app
COPY . .
RUN cargo leptos build --release -vv
FROM debian:bookworm-slim
RUN groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser
ENV LEPTOS_SITE_ADDR=0.0.0.0:3000
EXPOSE 3000/tcp
WORKDIR /home/appuser
COPY --from=builder /app/target/site/pkg /home/appuser/pkg
COPY --from=builder /app/target/release/harmony-example-rust-webapp /home/appuser/harmony-example-rust-webapp
USER appuser
CMD /home/appuser/harmony-example-rust-webapp

View File

@@ -1,8 +1,7 @@
use std::str::FromStr;
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::tenant::TenantScore,
topology::{K8sAnywhereTopology, tenant::TenantConfig},
};
@@ -11,20 +10,21 @@ use harmony::{
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
id: Id::from_str("test-tenant-id"),
name: "testtenant".to_string(),
..Default::default()
},
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(tenant)]);
harmony_cli::init(maestro, None).await.unwrap();
}
// TODO write tests

View File

@@ -2,6 +2,7 @@ use std::net::{SocketAddr, SocketAddrV4};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
@@ -15,19 +16,18 @@ use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
harmony_tui::run(
Inventory::autoload(),
DummyInfra {},
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
],
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
]);
harmony_tui::init(maestro).await.unwrap();
}
fn build_large_score() -> LoadBalancerScore {

View File

@@ -5,9 +5,6 @@ version.workspace = true
readme.workspace = true
license.workspace = true
[features]
testing = []
[dependencies]
rand = "0.9"
hex = "0.4"
@@ -16,8 +13,8 @@ reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde.workspace = true
serde_json.workspace = true
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
@@ -30,28 +27,29 @@ harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid.workspace = true
url.workspace = true
kube = { workspace = true, features = ["derive"] }
kube.workspace = true
k8s-openapi.workspace = true
serde_yaml.workspace = true
http.workspace = true
serde-value.workspace = true
inquire.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories.workspace = true
lazy_static.workspace = true
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true
email_address = "0.2.9"
chrono.workspace = true
fqdn = { version = "0.4.6", features = [
"domain-label-cannot-start-or-end-with-hyphen",
"domain-label-length-limited-to-63",
"domain-name-without-special-chars",
"domain-name-length-limited-to-255",
"punycode",
"serde",
"domain-label-cannot-start-or-end-with-hyphen",
"domain-label-length-limited-to-63",
"domain-name-without-special-chars",
"domain-name-length-limited-to-255",
"punycode",
"serde",
] }
temp-dir = "0.1.14"
dyn-clone = "1.0.19"
@@ -59,15 +57,4 @@ similar.workspace = true
futures-util = "0.3.31"
tokio-util = "0.7.15"
strum = { version = "0.27.1", features = ["derive"] }
tempfile.workspace = true
serde_with = "3.14.0"
schemars = "0.8.22"
kube-derive = "1.1.0"
bollard.workspace = true
tar.workspace = true
base64.workspace = true
once_cell = "1.21.3"
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
[dev-dependencies]
pretty_assertions.workspace = true
tempfile = "3.20.0"

Binary file not shown.

View File

@@ -11,5 +11,5 @@ lazy_static! {
pub static ref REGISTRY_PROJECT: String =
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
pub static ref DRY_RUN: bool =
std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false));
std::env::var("HARMONY_DRY_RUN").map_or(true, |value| value.parse().unwrap_or(true));
}

View File

@@ -1,6 +1,5 @@
use rand::distr::Alphanumeric;
use rand::distr::SampleString;
use std::str::FromStr;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
@@ -24,13 +23,13 @@ pub struct Id {
value: String,
}
impl FromStr for Id {
type Err = ();
impl Id {
pub fn from_string(value: String) -> Self {
Self { value }
}
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Id {
value: s.to_string(),
})
pub fn from_str(value: &str) -> Self {
Self::from_string(value.to_string())
}
}

View File

@@ -47,7 +47,7 @@ impl serde::Serialize for Version {
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.value.fmt(f)
return self.value.fmt(f);
}
}

View File

@@ -35,9 +35,10 @@ impl PhysicalHost {
pub fn cluster_mac(&self) -> MacAddress {
self.network
.first()
.get(0)
.expect("Cluster physical host should have a network interface")
.mac_address
.clone()
}
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {

View File

@@ -1,82 +0,0 @@
use log::debug;
use once_cell::sync::Lazy;
use tokio::sync::broadcast;
use crate::modules::application::ApplicationFeatureStatus;
use super::{
interpret::{InterpretError, Outcome},
topology::TopologyStatus,
};
#[derive(Debug, Clone)]
pub enum HarmonyEvent {
HarmonyStarted,
HarmonyFinished,
InterpretExecutionStarted {
execution_id: String,
topology: String,
interpret: String,
score: String,
message: String,
},
InterpretExecutionFinished {
execution_id: String,
topology: String,
interpret: String,
score: String,
outcome: Result<Outcome, InterpretError>,
},
TopologyStateChanged {
topology: String,
status: TopologyStatus,
message: Option<String>,
},
ApplicationFeatureStateChanged {
topology: String,
application: String,
feature: String,
status: ApplicationFeatureStatus,
},
}
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
// TODO: Adjust channel capacity
let (tx, _rx) = broadcast::channel(100);
tx
});
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
if cfg!(any(test, feature = "testing")) {
let _ = event; // Suppress the "unused variable" warning for `event`
Ok(())
} else {
match HARMONY_EVENT_BUS.send(event) {
Ok(_) => Ok(()),
Err(_) => Err("send error: no subscribers"),
}
}
}
pub async fn subscribe<F, Fut>(name: &str, mut handler: F)
where
F: FnMut(HarmonyEvent) -> Fut + Send + 'static,
Fut: Future<Output = bool> + Send,
{
let mut rx = HARMONY_EVENT_BUS.subscribe();
debug!("[{name}] Service started. Listening for events...");
loop {
match rx.recv().await {
Ok(event) => {
if !handler(event).await {
debug!("[{name}] Handler requested exit.");
break;
}
}
Err(broadcast::error::RecvError::Lagged(n)) => {
debug!("[{name}] Lagged behind by {n} messages.");
}
Err(_) => break,
}
}
}

View File

@@ -7,7 +7,6 @@ use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::PreparationError,
};
pub enum InterpretName {
@@ -24,14 +23,6 @@ pub enum InterpretName {
TenantInterpret,
Application,
ArgoCD,
Alerting,
Ntfy,
HelmChart,
HelmCommand,
K8sResource,
Lamp,
ApplicationMonitoring,
K8sPrometheusCrdAlerting,
}
impl std::fmt::Display for InterpretName {
@@ -50,14 +41,6 @@ impl std::fmt::Display for InterpretName {
InterpretName::TenantInterpret => f.write_str("Tenant"),
InterpretName::Application => f.write_str("Application"),
InterpretName::ArgoCD => f.write_str("ArgoCD"),
InterpretName::Alerting => f.write_str("Alerting"),
InterpretName::Ntfy => f.write_str("Ntfy"),
InterpretName::HelmChart => f.write_str("HelmChart"),
InterpretName::HelmCommand => f.write_str("HelmCommand"),
InterpretName::K8sResource => f.write_str("K8sResource"),
InterpretName::Lamp => f.write_str("LAMP"),
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
}
}
}
@@ -130,14 +113,6 @@ impl std::fmt::Display for InterpretError {
}
impl Error for InterpretError {}
impl From<PreparationError> for InterpretError {
fn from(value: PreparationError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<ExecutorError> for InterpretError {
fn from(value: ExecutorError) -> Self {
Self {

View File

@@ -1,14 +1,12 @@
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use log::{debug, warn};
use crate::topology::TopologyStatus;
use log::{info, warn};
use super::{
interpret::{InterpretError, Outcome},
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{PreparationError, PreparationOutcome, Topology, TopologyState},
topology::Topology,
};
type ScoreVec<T> = Vec<Box<dyn Score<T>>>;
@@ -17,7 +15,7 @@ pub struct Maestro<T: Topology> {
inventory: Inventory,
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_state: TopologyState,
topology_preparation_result: Mutex<Option<Outcome>>,
}
impl<T: Topology> Maestro<T> {
@@ -25,46 +23,36 @@ impl<T: Topology> Maestro<T> {
///
/// This should rarely be used. Most of the time Maestro::initialize should be used instead.
pub fn new_without_initialization(inventory: Inventory, topology: T) -> Self {
let topology_name = topology.name().to_string();
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_state: TopologyState::new(topology_name),
topology_preparation_result: None.into(),
}
}
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, PreparationError> {
let mut instance = Self::new_without_initialization(inventory, topology);
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let instance = Self::new_without_initialization(inventory, topology);
instance.prepare_topology().await?;
Ok(instance)
}
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
async fn prepare_topology(&mut self) -> Result<PreparationOutcome, PreparationError> {
self.topology_state.prepare();
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
info!("Ensuring topology '{}' is ready...", self.topology.name());
let outcome = self.topology.ensure_ready().await?;
info!(
"Topology '{}' readiness check complete: {}",
self.topology.name(),
outcome.status
);
let result = self.topology.ensure_ready().await;
match result {
Ok(outcome) => {
match outcome.clone() {
PreparationOutcome::Success { details } => {
self.topology_state.success(details);
}
PreparationOutcome::Noop => {
self.topology_state.noop();
}
};
Ok(outcome)
}
Err(err) => {
self.topology_state.error(err.to_string());
Err(err)
}
}
self.topology_preparation_result
.lock()
.unwrap()
.replace(outcome.clone());
Ok(outcome)
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
@@ -73,7 +61,15 @@ impl<T: Topology> Maestro<T> {
}
fn is_topology_initialized(&self) -> bool {
self.topology_state.status == TopologyStatus::Success
let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() {
match outcome.status {
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else {
false
}
}
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
@@ -84,9 +80,11 @@ impl<T: Topology> Maestro<T> {
self.topology.name(),
);
}
debug!("Interpreting score {score:?}");
let result = score.interpret(&self.inventory, &self.topology).await;
debug!("Got result {result:?}");
info!("Running score {score:?}");
let interpret = score.create_interpret();
info!("Launching interpret {interpret:?}");
let result = interpret.execute(&self.inventory, &self.topology).await;
info!("Got result {result:?}");
result
}

View File

@@ -3,7 +3,6 @@ pub mod data;
pub mod executors;
pub mod filter;
pub mod hardware;
pub mod instrumentation;
pub mod interpret;
pub mod inventory;
pub mod maestro;

View File

@@ -1,62 +1,22 @@
use std::collections::BTreeMap;
use async_trait::async_trait;
use serde::Serialize;
use serde_value::Value;
use super::{
data::Id,
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, Outcome},
inventory::Inventory,
topology::Topology,
};
use super::{interpret::Interpret, topology::Topology};
#[async_trait]
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
async fn interpret(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let id = Id::default();
let interpret = self.create_interpret();
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
message: format!("{} running...", interpret.get_name()),
})
.unwrap();
let result = interpret.execute(inventory, topology).await;
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
outcome: result.clone(),
})
.unwrap();
result
}
fn name(&self) -> String;
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
}
pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value;
}
impl<S, T> SerializeScore<T> for S
impl<'de, S, T> SerializeScore<T> for S
where
T: Topology,
S: Score<T> + Serialize,
@@ -64,7 +24,7 @@ where
fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble
// up?
serde_value::to_value(self).expect("Score should serialize successfully")
serde_value::to_value(&self).expect("Score should serialize successfully")
}
}

View File

@@ -4,6 +4,8 @@ use harmony_types::net::MacAddress;
use log::info;
use crate::executors::ExecutorError;
use crate::interpret::InterpretError;
use crate::interpret::Outcome;
use super::DHCPStaticEntry;
use super::DhcpServer;
@@ -17,8 +19,6 @@ use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::PreparationError;
use super::PreparationOutcome;
use super::Router;
use super::TftpServer;
@@ -48,7 +48,7 @@ impl Topology for HAClusterTopology {
fn name(&self) -> &str {
"HAClusterTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
todo!(
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
)
@@ -241,15 +241,13 @@ pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
"DummyInfra"
todo!()
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(PreparationOutcome::Success {
details: dummy_msg.into(),
})
Ok(Outcome::success(dummy_msg.to_string()))
}
}

View File

@@ -1,11 +1,12 @@
use derive_new::new;
use futures_util::StreamExt;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod},
};
use kube::{
Client, Config, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
runtime::reflector::Lookup,
@@ -16,25 +17,14 @@ use kube::{
runtime::wait::await_condition,
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::json;
use similar::TextDiff;
use tokio::io::AsyncReadExt;
use serde::de::DeserializeOwned;
use similar::{DiffableStr, TextDiff};
#[derive(new, Clone)]
pub struct K8sClient {
client: Client,
}
impl Serialize for K8sClient {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}
impl std::fmt::Debug for K8sClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// This is a poor man's debug implementation for now as kube::Client does not provide much
@@ -53,66 +43,6 @@ impl K8sClient {
})
}
pub async fn get_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<Option<Deployment>, Error> {
let deps: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(deps.get_opt(name).await?)
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
let pods: Api<Pod> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
}
pub async fn scale_deployment(
&self,
name: &str,
namespace: Option<&str>,
replicas: u32,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let patch = json!({
"spec": {
"replicas": replicas
}
});
let pp = PatchParams::default();
let scale = Patch::Apply(&patch);
deployments.patch_scale(name, &pp, &scale).await?;
Ok(())
}
pub async fn delete_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let delete_params = DeleteParams::default();
deployments.delete(name, &delete_params).await?;
Ok(())
}
pub async fn wait_until_deployment_ready(
&self,
name: String,
@@ -128,75 +58,13 @@ impl K8sClient {
}
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
let t = timeout.unwrap_or(300);
let t = if let Some(t) = timeout { t } else { 300 };
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
if res.is_ok() {
Ok(())
if let Ok(r) = res {
return Ok(());
} else {
Err("timed out while waiting for deployment".to_string())
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
&self,
name: String,
label: String,
namespace: Option<&str>,
command: Vec<&str>,
) -> Result<String, String> {
let api: Api<Pod>;
if let Some(ns) = namespace {
api = Api::namespaced(self.client.clone(), ns);
} else {
api = Api::default_namespaced(self.client.clone());
}
let pod_list = api
.list(&ListParams::default().labels(format!("{label}={name}").as_str()))
.await
.expect("couldn't get list of pods");
let res = api
.exec(
pod_list
.items
.first()
.expect("couldn't get pod")
.name()
.expect("couldn't get pod name")
.into_owned()
.as_str(),
command,
&AttachParams::default().stdout(true).stderr(true),
)
.await;
match res {
Err(e) => Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
.expect("Couldn't get status")
.await
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout().take() {
stdout.read_to_string(&mut stdout_buf).await;
}
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" {
Ok(stdout_buf)
} else {
Err(s)
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
}
}
return Err("timed out while waiting for deployment".to_string());
}
}
@@ -235,7 +103,7 @@ impl K8sClient {
.await;
match res {
Err(e) => Err(e.to_string()),
Err(e) => return Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
@@ -244,10 +112,14 @@ impl K8sClient {
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" { Ok(()) } else { Err(s) }
debug!("Status: {}", s);
if s == "Success" {
return Ok(());
} else {
return Err(s);
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
return Err("Couldn't get inner status of pod exec".to_string());
}
}
}
@@ -288,9 +160,8 @@ impl K8sClient {
trace!("Received current value {current:#?}");
// The resource exists, so we calculate and display a diff.
println!("\nPerforming dry-run for resource: '{}'", name);
let mut current_yaml = serde_yaml::to_value(&current).unwrap_or_else(|_| {
panic!("Could not serialize current value : {current:#?}")
});
let mut current_yaml = serde_yaml::to_value(&current)
.expect(&format!("Could not serialize current value : {current:#?}"));
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
let map = current_yaml.as_mapping_mut().unwrap();
let removed = map.remove_entry("status");
@@ -357,7 +228,7 @@ impl K8sClient {
}
}
pub async fn apply_many<K>(&self, resource: &[K], ns: Option<&str>) -> Result<Vec<K>, Error>
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as Resource>::Scope: ApplyStrategy<K>,
@@ -373,7 +244,7 @@ impl K8sClient {
pub async fn apply_yaml_many(
&self,
#[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>,
yaml: &Vec<serde_yaml::Value>,
ns: Option<&str>,
) -> Result<(), Error> {
for y in yaml.iter() {
@@ -389,33 +260,17 @@ impl K8sClient {
) -> Result<(), Error> {
let obj: DynamicObject = serde_yaml::from_value(yaml.clone()).expect("TODO do not unwrap");
let name = obj.metadata.name.as_ref().expect("YAML must have a name");
let namespace = obj
.metadata
.namespace
.as_ref()
.expect("YAML must have a namespace");
let api_version = yaml
.get("apiVersion")
.expect("couldn't get apiVersion from YAML")
.as_str()
.expect("couldn't get apiVersion as str");
let kind = yaml
.get("kind")
.expect("couldn't get kind from YAML")
.as_str()
.expect("couldn't get kind as str");
let split: Vec<&str> = api_version.splitn(2, "/").collect();
let g = split[0];
let v = split[1];
let gvk = GroupVersionKind::gvk(g, v, kind);
let api_resource = ApiResource::from_gvk(&gvk);
let namespace = match ns {
Some(n) => n,
None => obj
.metadata
.namespace
.as_ref()
.expect("YAML must have a namespace"),
};
// 4. Define the API resource type using the GVK from the object.
// The plural name 'applications' is taken from your CRD definition.
error!("This only supports argocd application harcoded, very rrrong");
let gvk = GroupVersionKind::gvk("argoproj.io", "v1alpha1", "Application");
let api_resource = ApiResource::from_gvk_with_plural(&gvk, "applications");
// 5. Create a dynamic API client for this resource type.
let api: Api<DynamicObject> =

View File

@@ -1,46 +1,30 @@
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use inquire::Confirm;
use log::{debug, info, warn};
use serde::Serialize;
use tokio::sync::OnceCell;
use crate::{
executors::ExecutorError,
interpret::InterpretStatus,
interpret::{InterpretError, Outcome},
inventory::Inventory,
modules::{
k3d::K3DInstallationScore,
monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus,
prometheus_operator::prometheus_operator_helm_chart_score,
},
prometheus::{
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
prometheus::PrometheusApplicationMonitoring,
},
},
score::Score,
maestro::Maestro,
modules::k3d::K3DInstallationScore,
topology::LocalhostTopology,
};
use super::{
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
PreparationOutcome, Topology,
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology,
k8s::K8sClient,
oberservability::monitoring::AlertReceiver,
tenant::{
TenantConfig, TenantManager,
k8s::K8sTenantManager,
network_policy::{
K3dNetworkPolicyStrategy, NetworkPolicyStrategy, NoopNetworkPolicyStrategy,
},
},
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
};
#[derive(Clone, Debug)]
struct K8sState {
client: Arc<K8sClient>,
source: K8sSource,
_source: K8sSource,
message: String,
}
@@ -74,42 +58,8 @@ impl K8sclient for K8sAnywhereTopology {
}
}
#[async_trait]
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
async fn install_prometheus(
&self,
sender: &CRDPrometheus,
inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> Result<PreparationOutcome, PreparationError> {
let po_result = self.ensure_prometheus_operator(sender).await?;
if po_result == PreparationOutcome::Noop {
debug!("Skipping Prometheus CR installation due to missing operator.");
return Ok(po_result);
}
let result = self
.get_k8s_prometheus_application_score(sender.clone(), receivers)
.await
.interpret(inventory, self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: outcome.message,
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
}
}
impl Serialize for K8sAnywhereTopology {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
@@ -134,19 +84,6 @@ impl K8sAnywhereTopology {
}
}
async fn get_k8s_prometheus_application_score(
&self,
sender: CRDPrometheus,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> K8sPrometheusCRDAlertingScore {
K8sPrometheusCRDAlertingScore {
sender,
receivers: receivers.unwrap_or_default(),
service_monitors: vec![],
prometheus_rules: vec![],
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
@@ -157,8 +94,9 @@ impl K8sAnywhereTopology {
return Err("Failed to run 'helm -version'".to_string());
}
// Print the version output
let version_output = String::from_utf8_lossy(&version_result.stdout);
debug!("Helm version: {}", version_output.trim());
println!("Helm version: {}", version_output.trim());
Ok(())
}
@@ -175,42 +113,33 @@ impl K8sAnywhereTopology {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), PreparationError> {
let result = self
.get_k3d_installation_score()
.interpret(&Inventory::empty(), self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(()),
InterpretStatus::NOOP => Ok(()),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
let maestro = Maestro::initialize(Inventory::autoload(), LocalhostTopology::new()).await?;
let k3d_score = self.get_k3d_installation_score();
maestro.interpret(Box::new(k3d_score)).await?;
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, PreparationError> {
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
let k8s_anywhere_config = &self.config;
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
// be careful when making modifications here
if k8s_anywhere_config.use_local_k3d {
debug!("Using local k3d cluster because of use_local_k3d set to true");
info!("Using local k3d cluster because of use_local_k3d set to true");
} else {
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
debug!("Loading kubeconfig {kubeconfig}");
match self.try_load_kubeconfig(kubeconfig).await {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(client) => {
return Ok(Some(K8sState {
client: Arc::new(client),
source: K8sSource::Kubeconfig,
_source: K8sSource::Kubeconfig,
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
}));
}
None => {
return Err(PreparationError::new(format!(
return Err(InterpretError::new(format!(
"Failed to load kubeconfig from {kubeconfig}"
)));
}
@@ -229,13 +158,22 @@ impl K8sAnywhereTopology {
}
if !k8s_anywhere_config.autoinstall {
warn!(
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
);
return Ok(None);
debug!("Autoinstall confirmation prompt");
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
debug!("Autoinstall confirmation {confirmation}");
if !confirmation {
warn!(
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
);
return Ok(None);
}
}
debug!("Starting K8sAnywhere installation");
info!("Starting K8sAnywhere installation");
self.try_install_k3d().await?;
let k3d_score = self.get_k3d_installation_score();
// I feel like having to rely on the k3d_rs crate here is a smell
@@ -247,8 +185,8 @@ impl K8sAnywhereTopology {
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),
source: K8sSource::LocalK3d,
message: "K8s client ready".to_string(),
_source: K8sSource::LocalK3d,
message: "Successfully installed K3D cluster and acquired client".to_string(),
},
Err(_) => todo!(),
};
@@ -256,21 +194,15 @@ impl K8sAnywhereTopology {
Ok(Some(state))
}
async fn ensure_k8s_tenant_manager(&self, k8s_state: &K8sState) -> Result<(), String> {
if self.tenant_manager.get().is_some() {
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
if let Some(_) = self.tenant_manager.get() {
return Ok(());
}
self.tenant_manager
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
let k8s_client = self.k8s_client().await?;
let network_policy_strategy: Box<dyn NetworkPolicyStrategy> = match k8s_state.source
{
K8sSource::LocalK3d => Box::new(K3dNetworkPolicyStrategy::new()),
K8sSource::Kubeconfig => Box::new(NoopNetworkPolicyStrategy::new()),
};
Ok(K8sTenantManager::new(k8s_client, network_policy_strategy))
Ok(K8sTenantManager::new(k8s_client))
})
.await?;
@@ -285,55 +217,6 @@ impl K8sAnywhereTopology {
)),
}
}
async fn ensure_prometheus_operator(
&self,
sender: &CRDPrometheus,
) -> Result<PreparationOutcome, PreparationError> {
let status = Command::new("sh")
.args(["-c", "kubectl get crd -A | grep -i prometheuses"])
.status()
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
if !status.success() {
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => {
debug!("installing prometheus operator");
let op_score =
prometheus_operator_helm_chart_score(sender.namespace.clone());
let result = op_score.interpret(&Inventory::empty(), self).await;
return match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: "installed prometheus operator".into(),
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(
"failed to install prometheus operator (unknown error)".into(),
)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
};
}
K8sSource::Kubeconfig => {
debug!("unable to install prometheus operator, contact cluster admin");
return Ok(PreparationOutcome::Noop);
}
}
} else {
warn!("Unable to detect k8s_state. Skipping Prometheus Operator install.");
return Ok(PreparationOutcome::Noop);
}
}
debug!("Prometheus operator is already present, skipping install");
Ok(PreparationOutcome::Success {
details: "prometheus operator present in cluster".into(),
})
}
}
#[derive(Clone, Debug)]
@@ -354,7 +237,7 @@ pub struct K8sAnywhereConfig {
///
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
///
/// Default: true
/// Default: false
pub autoinstall: bool,
/// Whether to use local k3d cluster.
@@ -373,7 +256,7 @@ impl K8sAnywhereConfig {
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(false)),
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
// TODO harmony_profile should be managed at a more core level than this
harmony_profile: std::env::var("HARMONY_PROFILE").map_or_else(
|_| "dev".to_string(),
@@ -391,25 +274,26 @@ impl Topology for K8sAnywhereTopology {
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(PreparationError::new(
"no K8s client could be found or installed".to_string(),
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
self.ensure_k8s_tenant_manager(k8s_state)
self.ensure_k8s_tenant_manager()
.await
.map_err(PreparationError::new)?;
.map_err(|e| InterpretError::new(e))?;
match self.is_helm_available() {
Ok(()) => Ok(PreparationOutcome::Success {
details: format!("{} + helm available", k8s_state.message.clone()),
}),
Err(e) => Err(PreparationError::new(format!("helm unavailable: {}", e))),
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}

View File

@@ -1,7 +1,9 @@
use async_trait::async_trait;
use derive_new::new;
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
use crate::interpret::{InterpretError, Outcome};
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
@@ -12,10 +14,10 @@ impl Topology for LocalhostTopology {
"LocalHostTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
Ok(PreparationOutcome::Success {
details: "Localhost is Chuck Norris, always ready.".into(),
})
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
Ok(Outcome::success(
"Localhost is Chuck Norris, always ready.".to_string(),
))
}
}

View File

@@ -6,7 +6,6 @@ mod k8s_anywhere;
mod localhost;
pub mod oberservability;
pub mod tenant;
use derive_new::new;
pub use k8s_anywhere::*;
pub use localhost::*;
pub mod k8s;
@@ -27,13 +26,10 @@ pub use tftp::*;
mod helm_command;
pub use helm_command::*;
use super::{
executors::ExecutorError,
instrumentation::{self, HarmonyEvent},
};
use std::error::Error;
use std::net::IpAddr;
use super::interpret::{InterpretError, Outcome};
/// Represents a logical view of an infrastructure environment providing specific capabilities.
///
/// A Topology acts as a self-contained "package" responsible for managing access
@@ -61,128 +57,9 @@ pub trait Topology: Send + Sync {
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
///
/// # Returns
/// - `Ok(PreparationOutcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(PreparationError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError>;
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PreparationOutcome {
Success { details: String },
Noop,
}
#[derive(Debug, Clone, new)]
pub struct PreparationError {
msg: String,
}
impl std::fmt::Display for PreparationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.msg)
}
}
impl Error for PreparationError {}
impl From<ExecutorError> for PreparationError {
fn from(value: ExecutorError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<kube::Error> for PreparationError {
fn from(value: kube::Error) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
impl From<String> for PreparationError {
fn from(value: String) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum TopologyStatus {
Queued,
Preparing,
Success,
Noop,
Error,
}
pub struct TopologyState {
pub topology: String,
pub status: TopologyStatus,
}
impl TopologyState {
pub fn new(topology: String) -> Self {
let instance = Self {
topology,
status: TopologyStatus::Queued,
};
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: instance.topology.clone(),
status: instance.status.clone(),
message: None,
})
.unwrap();
instance
}
pub fn prepare(&mut self) {
self.status = TopologyStatus::Preparing;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn success(&mut self, message: String) {
self.status = TopologyStatus::Success;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
pub fn noop(&mut self) {
self.status = TopologyStatus::Noop;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn error(&mut self, message: String) {
self.status = TopologyStatus::Error;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
}
#[derive(Debug)]
@@ -211,7 +88,7 @@ impl Serialize for Url {
{
match self {
Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(url.as_str()),
Url::Url(url) => serializer.serialize_str(&url.as_str()),
}
}
}

View File

@@ -11,7 +11,7 @@ use crate::{
};
#[async_trait]
pub trait AlertSender: Send + Sync + std::fmt::Debug {
pub trait AlertSender: Any + Send + Sync + std::fmt::Debug {
fn name(&self) -> String;
}
@@ -45,7 +45,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
}
fn get_name(&self) -> InterpretName {
InterpretName::Alerting
todo!()
}
fn get_version(&self) -> Version {
@@ -64,9 +64,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
#[async_trait]
pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
fn as_any(&self) -> &dyn Any;
}
#[async_trait]
@@ -76,6 +74,6 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
}
#[async_trait]
pub trait ScrapeTarget<S: AlertSender> {
pub trait ScrapeTarger<S: AlertSender> {
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
}

View File

@@ -27,11 +27,11 @@ pub struct UnmanagedRouter {
impl Router for UnmanagedRouter {
fn get_gateway(&self) -> IpAddress {
self.gateway
self.gateway.clone()
}
fn get_cidr(&self) -> Ipv4Cidr {
self.cidr
self.cidr.clone()
}
fn get_host(&self) -> LogicalHost {

View File

@@ -15,38 +15,36 @@ use k8s_openapi::{
apimachinery::pkg::util::intstr::IntOrString,
};
use kube::Resource;
use log::debug;
use log::{debug, info, warn};
use serde::de::DeserializeOwned;
use serde_json::json;
use tokio::sync::OnceCell;
use super::{TenantConfig, TenantManager, network_policy::NetworkPolicyStrategy};
use super::{TenantConfig, TenantManager};
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct K8sTenantManager {
k8s_client: Arc<K8sClient>,
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
}
impl K8sTenantManager {
pub fn new(
client: Arc<K8sClient>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
) -> Self {
pub fn new(client: Arc<K8sClient>) -> Self {
Self {
k8s_client: client,
k8s_tenant_config: Arc::new(OnceCell::new()),
network_policy_strategy,
}
}
}
impl K8sTenantManager {
fn get_namespace_name(&self, config: &TenantConfig) -> String {
config.name.clone()
}
fn ensure_constraints(&self, _namespace: &Namespace) -> Result<(), ExecutorError> {
// TODO: Ensure constraints are applied to namespace (https://git.nationtech.io/NationTech/harmony/issues/98)
warn!("Validate that when tenant already exists (by id) that name has not changed");
warn!("Make sure other Tenant constraints are respected by this k8s implementation");
Ok(())
}
@@ -221,6 +219,24 @@ impl K8sTenantManager {
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "10.43.0.1/32",
}
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "172.23.0.0/16",
}
}
]
},
{
"to": [
{
@@ -288,19 +304,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -345,19 +361,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -390,27 +406,12 @@ impl K8sTenantManager {
}
}
impl Clone for K8sTenantManager {
fn clone(&self) -> Self {
Self {
k8s_client: self.k8s_client.clone(),
k8s_tenant_config: self.k8s_tenant_config.clone(),
network_policy_strategy: self.network_policy_strategy.clone_box(),
}
}
}
#[async_trait]
impl TenantManager for K8sTenantManager {
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
let namespace = self.build_namespace(config)?;
let resource_quota = self.build_resource_quota(config)?;
let network_policy = self.build_network_policy(config)?;
let network_policy = self
.network_policy_strategy
.adjust_policy(network_policy, config);
let resource_limit_range = self.build_limit_range(config)?;
self.ensure_constraints(&namespace)?;
@@ -427,14 +428,13 @@ impl TenantManager for K8sTenantManager {
debug!("Creating network_policy for tenant {}", config.name);
self.apply_resource(network_policy, config).await?;
debug!(
info!(
"Success provisionning K8s tenant id {} name {}",
config.id, config.name
);
self.store_config(config);
Ok(())
}
async fn get_tenant_config(&self) -> Option<TenantConfig> {
self.k8s_tenant_config.get().cloned()
}

View File

@@ -1,11 +1,11 @@
pub mod k8s;
mod manager;
pub mod network_policy;
use std::str::FromStr;
use crate::data::Id;
pub use manager::*;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use crate::data::Id;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
pub struct TenantConfig {

View File

@@ -1,120 +0,0 @@
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::TenantConfig;
pub trait NetworkPolicyStrategy: Send + Sync + std::fmt::Debug {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy>;
fn adjust_policy(&self, policy: NetworkPolicy, config: &TenantConfig) -> NetworkPolicy;
}
#[derive(Clone, Debug)]
pub struct NoopNetworkPolicyStrategy {}
impl NoopNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for NoopNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for NoopNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
policy
}
}
#[derive(Clone, Debug)]
pub struct K3dNetworkPolicyStrategy {}
impl K3dNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for K3dNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for K3dNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
let mut egress = policy
.spec
.clone()
.unwrap_or_default()
.egress
.clone()
.unwrap_or_default();
egress.push(NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(), // TODO: query the IP range https://git.nationtech.io/NationTech/harmony/issues/108
..Default::default()
}),
..Default::default()
}]),
..Default::default()
});
NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(egress),
..policy.spec.unwrap_or_default()
}),
..policy
}
}
}
#[cfg(test)]
mod tests {
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::{K3dNetworkPolicyStrategy, NetworkPolicyStrategy};
#[test]
pub fn should_add_ip_block_for_k3d_harmony_server() {
let strategy = K3dNetworkPolicyStrategy::new();
let policy =
strategy.adjust_policy(NetworkPolicy::default(), &super::TenantConfig::default());
let expected_policy = NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(vec![NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(),
..Default::default()
}),
..Default::default()
}]),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
assert_eq!(expected_policy, policy);
}
}

View File

@@ -60,7 +60,7 @@ impl DnsServer for OPNSenseFirewall {
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
OPNSenseFirewall::get_ip(&self)
}
fn get_host(&self) -> LogicalHost {

View File

@@ -48,7 +48,7 @@ impl HttpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let caddy = config.caddy();
if caddy.get_full_config().is_none() {
if let None = caddy.get_full_config() {
info!("Http config not available in opnsense config, installing package");
config.install_package("os-caddy").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(

View File

@@ -121,12 +121,10 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
LoadBalancerService {
backend_servers,
listening_port: frontend.bind.parse().unwrap_or_else(|_| {
panic!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
frontend.bind
)
}),
listening_port: frontend.bind.parse().expect(&format!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
frontend.bind
)),
health_check,
}
})
@@ -169,28 +167,28 @@ pub(crate) fn get_health_check_for_backend(
None => return None,
};
let haproxy_health_check = haproxy
let haproxy_health_check = match haproxy
.healthchecks
.healthchecks
.iter()
.find(|h| &h.uuid == health_check_uuid)?;
.find(|h| &h.uuid == health_check_uuid)
{
Some(health_check) => health_check,
None => return None,
};
let binding = haproxy_health_check.health_check_type.to_uppercase();
let uppercase = binding.as_str();
match uppercase {
"TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
if !checkport.is_empty() {
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| {
panic!(
"HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
if checkport.len() > 0 {
return Some(HealthCheck::TCP(Some(checkport.parse().expect(&format!(
"HAProxy check port should be a valid port number, got {checkport}"
)))));
}
}
Some(HealthCheck::TCP(None))
return Some(HealthCheck::TCP(None));
}
"HTTP" => {
let path: String = haproxy_health_check
@@ -357,13 +355,16 @@ mod tests {
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let mut server = HAProxyServer::default();
server.uuid = "server3".to_string();
server.address = "192.168.1.3".to_string();
server.port = 8080;
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -383,12 +384,10 @@ mod tests {
let backend = HAProxyBackend::default();
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -403,12 +402,10 @@ mod tests {
backend.linked_servers.content = Some("server4,server5".to_string());
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -419,28 +416,20 @@ mod tests {
#[test]
fn test_get_servers_for_backend_multiple_linked_servers() {
// Create a backend with multiple linked servers
#[allow(clippy::field_reassign_with_default)]
let mut backend = HAProxyBackend::default();
backend.linked_servers.content = Some("server1,server2".to_string());
//
// Create an HAProxy instance with matching servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "some-hostname.test.mcd".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "some-hostname.test.mcd".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let server = HAProxyServer {
uuid: "server2".to_string(),
address: "192.168.1.2".to_string(),
port: 8080,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server2".to_string();
server.address = "192.168.1.2".to_string();
server.port = 8080;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result

View File

@@ -58,7 +58,7 @@ impl TftpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let tftp = config.tftp();
if tftp.get_full_config().is_none() {
if let None = tftp.get_full_config() {
info!("Tftp config not available in opnsense config, installing package");
config.install_package("os-tftp").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(

View File

@@ -13,7 +13,7 @@ pub trait ApplicationFeature<T: Topology>:
fn name(&self) -> String;
}
pub trait ApplicationFeatureClone<T: Topology> {
trait ApplicationFeatureClone<T: Topology> {
fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>;
}
@@ -27,7 +27,7 @@ where
}
impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{

View File

@@ -1,11 +1,13 @@
use std::{backtrace, collections::HashMap};
use k8s_openapi::{Metadata, NamespaceResourceScope, Resource};
use log::debug;
use serde::Serialize;
use serde_with::skip_serializing_none;
use serde_yaml::Value;
use url::Url;
use crate::modules::application::features::CDApplicationConfig;
#[skip_serializing_none]
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Helm {
@@ -25,18 +27,13 @@ pub struct Helm {
pub namespace: Option<String>,
}
#[skip_serializing_none]
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Source {
// Using string for this because URL enforces a URL scheme at the beginning but Helm, ArgoCD, etc do not, and it can be counterproductive,
// as the only way I've found to get OCI working isn't by using oci:// but rather no scheme at all
#[serde(rename = "repoURL")]
pub repo_url: String,
pub repo_url: Url,
pub target_revision: Option<String>,
pub chart: String,
pub helm: Helm,
pub path: String,
}
#[derive(Clone, Debug, Serialize)]
@@ -70,7 +67,6 @@ pub struct SyncPolicy {
pub retry: Retry,
}
#[skip_serializing_none]
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ArgoApplication {
@@ -89,7 +85,7 @@ impl Default for ArgoApplication {
namespace: Default::default(),
project: Default::default(),
source: Source {
repo_url: "http://asdf".to_string(),
repo_url: Url::parse("http://asdf").expect("Couldn't parse to URL"),
target_revision: None,
chart: "".to_string(),
helm: Helm {
@@ -108,7 +104,6 @@ impl Default for ArgoApplication {
api_versions: vec![],
namespace: None,
},
path: "".to_string(),
},
sync_policy: SyncPolicy {
automated: Automated {
@@ -138,10 +133,10 @@ impl From<CDApplicationConfig> for ArgoApplication {
namespace: Some(value.namespace),
project: "default".to_string(),
source: Source {
repo_url: value.helm_chart_repo_url,
target_revision: Some(value.version.to_string()),
chart: value.helm_chart_name.clone(),
path: value.helm_chart_name,
repo_url: Url::parse(value.helm_chart_repo_url.to_string().as_str())
.expect("couldn't convert to URL"),
target_revision: None,
chart: value.helm_chart_name,
helm: Helm {
pass_credentials: None,
parameters: vec![],
@@ -150,7 +145,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
value_files: vec![],
ignore_missing_value_files: None,
values: None,
values_object: value.values_overrides,
values_object: Some(value.values_overrides),
skip_crds: None,
skip_schema_validation: None,
version: None,
@@ -184,11 +179,12 @@ impl ArgoApplication {
pub fn to_yaml(&self) -> serde_yaml::Value {
let name = &self.name;
let namespace = if let Some(ns) = self.namespace.as_ref() {
ns
&ns
} else {
"argocd"
};
let project = &self.project;
let source = &self.source;
let yaml_str = format!(
r#"
@@ -217,7 +213,7 @@ spec:
let mut yaml_value: Value =
serde_yaml::from_str(yaml_str.as_str()).expect("couldn't parse string to YAML");
let spec = yaml_value
let mut spec = yaml_value
.get_mut("spec")
.expect("couldn't get spec from yaml")
.as_mapping_mut()
@@ -227,7 +223,7 @@ spec:
serde_yaml::to_value(&self.source).expect("couldn't serialize source to value");
let sync_policy = serde_yaml::to_value(&self.sync_policy)
.expect("couldn't serialize sync_policy to value");
let revision_history_limit = serde_yaml::to_value(self.revision_history_limit)
let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit)
.expect("couldn't serialize revision_history_limit to value");
spec.insert(
@@ -256,7 +252,7 @@ spec:
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use url::Url;
use crate::modules::application::features::{
ArgoApplication, Automated, Backoff, Helm, Retry, Source, SyncPolicy,
@@ -269,7 +265,7 @@ mod tests {
namespace: Some("test-ns".to_string()),
project: "test-project".to_string(),
source: Source {
repo_url: "http://test".to_string(),
repo_url: Url::parse("http://test").unwrap(),
target_revision: None,
chart: "test-chart".to_string(),
helm: Helm {
@@ -288,7 +284,6 @@ mod tests {
api_versions: vec![],
namespace: None,
},
path: "".to_string(),
},
sync_policy: SyncPolicy {
automated: Automated {
@@ -320,15 +315,24 @@ spec:
server: https://kubernetes.default.svc
namespace: test-ns
source:
repoURL: http://test
repoUrl: http://test/
targetRevision: null
chart: test-chart
helm:
passCredentials: null
parameters: []
fileParameters: []
releaseName: test-release-neame
valueFiles: []
ignoreMissingValueFiles: null
values: null
valuesObject: null
skipCrds: null
skipSchemaValidation: null
version: null
kubeVersion: null
apiVersions: []
path: ''
namespace: null
syncPolicy:
automated:
prune: false

View File

@@ -1,7 +1,7 @@
use std::{io::Write, process::Command, sync::Arc};
use async_trait::async_trait;
use log::info;
use log::{error, info};
use serde_yaml::Value;
use tempfile::NamedTempFile;
@@ -9,12 +9,15 @@ use crate::{
config::HARMONY_DATA_DIR,
data::Version,
inventory::Inventory,
modules::application::{
ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
modules::{
application::{
Application, ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
},
helm::chart::HelmChartScore,
},
score::Score,
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, Url},
};
/// ContinuousDelivery in Harmony provides this functionality :
@@ -56,8 +59,11 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
chart_url: String,
image_name: String,
) -> Result<(), String> {
// TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
// https://git.nationtech.io/NationTech/harmony/issues/106
error!(
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
);
error!("TODO hardcoded k3d bin path is wrong");
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
// --- 1. Import the container image into the k3d cluster ---
info!(
@@ -142,18 +148,24 @@ impl<
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
let image = self.application.image_name();
// TODO
error!(
"TODO reverse helm chart packaging and docker image build. I put helm package first for faster iterations"
);
// TODO Write CI/CD workflow files
// we can autotedect the CI type using the remote url (default to github action for github
// url, etc..)
// Or ask for it when unknown
let helm_chart = self.application.build_push_helm_package(&image).await?;
info!("Pushed new helm chart {helm_chart}");
// TODO: Make building image configurable/skippable if image already exists (prompt)")
// https://git.nationtech.io/NationTech/harmony/issues/104
let image = self.application.build_push_oci_image().await?;
info!("Pushed new docker image {image}");
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
info!("Installing ContinuousDelivery feature");
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
@@ -166,37 +178,42 @@ impl<
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
info!("Deploying to target {target:?}");
let score = ArgoHelmScore {
namespace: "harmony-example-rust-webapp".to_string(),
namespace: "harmonydemo-staging".to_string(),
openshift: true,
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_repo_url: Url::Url(url::Url::parse("oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart/harmony-example-rust-webapp-chart").unwrap()),
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
values_overrides: None,
values_overrides: Value::Null,
name: "harmony-demo-rust-webapp".to_string(),
namespace: "harmony-example-rust-webapp".to_string(),
namespace: "harmonydemo-staging".to_string(),
})],
};
score
.interpret(&Inventory::empty(), topology)
.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.unwrap();
}
};
Ok(())
todo!("1. Create ArgoCD score that installs argo using helm chart, see if Taha's already done it
- [X] Package app (docker image, helm chart)
- [X] Push to registry
- [X] Push only if staging or prod
- [X] Deploy to local k3d when target is local
- [ ] Poke Argo
- [ ] Ensure app is up")
}
fn name(&self) -> String {
"ContinuousDelivery".to_string()
@@ -207,9 +224,9 @@ impl<
/// more CD systems
pub struct CDApplicationConfig {
pub version: Version,
pub helm_chart_repo_url: String,
pub helm_chart_repo_url: Url,
pub helm_chart_name: String,
pub values_overrides: Option<Value>,
pub values_overrides: Value,
pub name: String,
pub namespace: String,
}

View File

@@ -1,4 +1,5 @@
use async_trait::async_trait;
use k8s_openapi::Resource;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::str::FromStr;
@@ -49,21 +50,19 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
self.score.interpret(inventory, topology).await?;
self.score
.create_interpret()
.execute(inventory, topology)
.await?;
let k8s_client = topology.k8s_client().await?;
k8s_client
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
.await
.unwrap();
Ok(Outcome::success(format!(
"ArgoCD installed with {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
"Successfully installed ArgoCD and {} Applications",
self.argo_apps.len()
)))
}
@@ -646,7 +645,7 @@ server:
# Argo CD server ingress configuration
ingress:
# -- Enable an ingress resource for the Argo CD server
enabled: true
enabled: false
# -- Specific implementation for ingress controller. One of `generic`, `aws` or `gke`
## Additional configuration might be required in related configuration sections
controller: generic
@@ -986,7 +985,7 @@ commitServer:
);
HelmChartScore {
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str("argo-cd").unwrap(),
chart_name: NonBlankString::from_str("argo/argo-cd").unwrap(),
chart_version: Some(NonBlankString::from_str("8.1.2").unwrap()),

View File

@@ -1,104 +1,52 @@
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use async_trait::async_trait;
use log::info;
use crate::topology::MultiTargetTopology;
use crate::{
inventory::Inventory,
modules::monitoring::{
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
modules::{
application::{Application, ApplicationFeature},
monitoring::{
application_monitoring::k8s_application_monitoring_score::ApplicationPrometheusMonitoringScore,
kube_prometheus::types::{NamespaceSelector, ServiceMonitor}, prometheus::prometheus::Prometheus,
},
},
score::Score,
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
topology::{oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender}, tenant::TenantManager, HelmCommand, K8sclient, Topology},
};
use crate::{
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver,
};
use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose};
use log::{debug, info};
#[derive(Debug, Clone)]
pub struct Monitoring {
pub struct PrometheusMonitoring {
pub application: Arc<dyn Application>,
pub alert_receiver: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
pub alert_receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
pub alert_rules: Vec<Box<dyn AlertRule<Prometheus>>>,
}
#[async_trait]
impl<
T: Topology
+ HelmCommand
+ 'static
+ TenantManager
+ K8sclient
+ MultiTargetTopology
+ std::fmt::Debug
+ PrometheusApplicationMonitoring<CRDPrometheus>,
> ApplicationFeature<T> for Monitoring
{
impl<T: Topology + HelmCommand + 'static + TenantManager> ApplicationFeature<T> for PrometheusMonitoring {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
info!("Ensuring monitoring is available for application");
let namespace = topology
.get_tenant_config()
.await
.map(|ns| ns.name.clone())
.unwrap_or_else(|| self.application.name());
let mut alerting_score = ApplicationMonitoringScore {
sender: CRDPrometheus {
namespace: namespace.clone(),
client: topology.k8s_client().await.unwrap(),
},
application: self.application.clone(),
receivers: self.alert_receiver.clone(),
};
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
};
ntfy.interpret(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
let ntfy_default_auth_header = format!(
"Basic {}",
general_purpose::STANDARD.encode(format!(
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
))
);
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
let ntfy_default_auth_param = general_purpose::STANDARD
.encode(ntfy_default_auth_header)
.replace("=", "");
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
let ntfy_receiver = WebhookReceiver {
name: "ntfy-webhook".to_string(),
url: Url::Url(
url::Url::parse(
format!(
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
namespace.clone()
)
.as_str(),
)
.unwrap(),
),
let ns = self.application.name();
let mut service_monitor = ServiceMonitor::default();
service_monitor.name = ns.clone();
service_monitor.namespace = ns.clone();
service_monitor.namespace_selector = Some(NamespaceSelector {
any: true,
match_names: vec![ns.clone()],
});
let alerting_score = ApplicationPrometheusMonitoringScore {
namespace: ns,
receivers: self.alert_receivers.clone(),
rules: self.alert_rules.clone(),
service_monitors: vec![service_monitor],
};
alerting_score.receivers.push(Box::new(ntfy_receiver));
alerting_score
.interpret(&Inventory::empty(), topology)
.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
.unwrap();
Ok(())
}
fn name(&self) -> String {

View File

@@ -5,28 +5,19 @@ mod rust;
use std::sync::Arc;
pub use feature::*;
use log::debug;
use log::info;
pub use oci::*;
pub use rust::*;
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::{Id, Version},
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
topology::Topology,
};
#[derive(Clone, Debug)]
pub enum ApplicationFeatureStatus {
Installing,
Installed,
Failed { details: String },
}
pub trait Application: std::fmt::Debug + Send + Sync {
fn name(&self) -> String;
}
@@ -45,7 +36,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
topology: &T,
) -> Result<Outcome, InterpretError> {
let app_name = self.application.name();
debug!(
info!(
"Preparing {} features [{}] for application {app_name}",
self.features.len(),
self.features
@@ -55,41 +46,22 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
.join(", ")
);
for feature in self.features.iter() {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installing,
})
.unwrap();
info!(
"Installing feature {} for application {app_name}",
feature.name()
);
let _ = match feature.ensure_installed(topology).await {
Ok(()) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installed,
})
.unwrap();
}
Ok(()) => (),
Err(msg) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Failed {
details: msg.clone(),
},
})
.unwrap();
return Err(InterpretError::new(format!(
"Application Interpret failed to install feature : {msg}"
)));
}
};
}
Ok(Outcome::success("Application created".to_string()))
todo!(
"Do I need to do anything more than this here?? I feel like the Application trait itself should expose something like ensure_ready but its becoming redundant. We'll see as this evolves."
)
}
fn get_name(&self) -> InterpretName {
@@ -108,12 +80,3 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
todo!()
}
}
impl Serialize for dyn Application {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}

View File

@@ -1,18 +1,14 @@
use std::fs;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::process;
use std::sync::Arc;
use async_trait::async_trait;
use bollard::query_parameters::PushImageOptionsBuilder;
use bollard::{Docker, body_full};
use dockerfile_builder::Dockerfile;
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt;
use log::{debug, info, log_enabled};
use log::{debug, error, info};
use serde::Serialize;
use tar::Archive;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::{
@@ -46,7 +42,7 @@ where
}
fn name(&self) -> String {
format!("{} [ApplicationScore]", self.application.name())
format!("Application: {}", self.application.name())
}
}
@@ -109,20 +105,22 @@ impl OCICompliant for RustWebapp {
// It's async to match the trait definition, though the underlying docker commands are blocking.
info!("Starting OCI image build and push for '{}'", self.name);
// 1. Build the image by calling the synchronous helper function.
let image_tag = self.image_name();
self.build_docker_image(&image_tag)
.await
// 1. Build the local image by calling the synchronous helper function.
let local_image_name = self.local_image_name();
self.build_docker_image(&local_image_name)
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
info!("Successfully built Docker image: {}", image_tag);
info!(
"Successfully built local Docker image: {}",
local_image_name
);
let remote_image_name = self.image_name();
// 2. Push the image to the registry.
self.push_docker_image(&image_tag)
.await
self.push_docker_image(&local_image_name, &remote_image_name)
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
info!("Successfully pushed Docker image to: {}", image_tag);
info!("Successfully pushed Docker image to: {}", remote_image_name);
Ok(image_tag)
Ok(remote_image_name)
}
fn local_image_name(&self) -> String {
@@ -155,74 +153,68 @@ impl RustWebapp {
}
/// Builds the Docker image using the generated Dockerfile.
pub async fn build_docker_image(
pub fn build_docker_image(
&self,
image_name: &str,
) -> Result<String, Box<dyn std::error::Error>> {
debug!("Generating Dockerfile for '{}'", self.name);
let _dockerfile_path = self.build_dockerfile()?;
info!("Generating Dockerfile for '{}'", self.name);
let dockerfile_path = self.build_dockerfile()?;
let docker = Docker::connect_with_socket_defaults().unwrap();
let quiet = !log_enabled!(log::Level::Debug);
let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default()
.dockerfile("Dockerfile.harmony")
.t(image_name)
.q(quiet)
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
.platform("linux/x86_64");
let mut temp_tar_builder = tar::Builder::new(Vec::new());
temp_tar_builder
.append_dir_all("", self.project_root.clone())
.unwrap();
let archive = temp_tar_builder
.into_inner()
.expect("couldn't finish creating tar");
let archived_files = Archive::new(archive.as_slice())
.entries()
.unwrap()
.map(|entry| entry.unwrap().path().unwrap().into_owned())
.collect::<Vec<_>>();
debug!("files in docker tar: {:#?}", archived_files);
let mut image_build_stream = docker.build_image(
build_image_options.build(),
None,
Some(body_full(archive.into())),
info!(
"Building Docker image with file {} from root {}",
dockerfile_path.to_string_lossy(),
self.project_root.to_string_lossy()
);
let output = process::Command::new("docker")
.args([
"build",
"--file",
dockerfile_path.to_str().unwrap(),
"-t",
&image_name,
self.project_root.to_str().unwrap(),
])
.spawn()?
.wait_with_output()?;
while let Some(msg) = image_build_stream.next().await {
debug!("Message: {msg:?}");
}
self.check_output(&output, "Failed to build Docker image")?;
Ok(image_name.to_string())
}
/// Tags and pushes a Docker image to the configured remote registry.
async fn push_docker_image(
fn push_docker_image(
&self,
image_tag: &str,
image_name: &str,
full_tag: &str,
) -> Result<String, Box<dyn std::error::Error>> {
debug!("Pushing docker image {image_tag}");
info!("Pushing docker image {full_tag}");
let docker = Docker::connect_with_socket_defaults().unwrap();
// let push_options = PushImageOptionsBuilder::new().tag(tag);
let mut push_image_stream = docker.push_image(
image_tag,
Some(PushImageOptionsBuilder::new().build()),
None,
// Tag the image for the remote registry.
let output = process::Command::new("docker")
.args(["tag", image_name, &full_tag])
.spawn()?
.wait_with_output()?;
self.check_output(&output, "Tagging docker image failed")?;
debug!(
"docker tag output: stdout: {}, stderr: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
while let Some(msg) = push_image_stream.next().await {
debug!("Message: {msg:?}");
}
// Push the image.
let output = process::Command::new("docker")
.args(["push", &full_tag])
.spawn()?
.wait_with_output()?;
self.check_output(&output, "Pushing docker image failed")?;
debug!(
"docker push output: stdout: {}, stderr: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
Ok(image_tag.to_string())
Ok(full_tag.to_string())
}
/// Checks the output of a process command for success.
@@ -288,8 +280,9 @@ impl RustWebapp {
.unwrap(),
);
// Copy the compiled binary from the builder stage.
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/home/appuser/{}", self.name);
dockerfile.push(
@@ -327,8 +320,9 @@ impl RustWebapp {
));
// Copy only the compiled binary from the builder stage.
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
dockerfile.push(
@@ -355,11 +349,7 @@ impl RustWebapp {
image_url: &str,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_name = format!("{}-chart", self.name);
let chart_dir = self
.project_root
.join(".harmony_generated")
.join("helm")
.join(&chart_name);
let chart_dir = self.project_root.join("helm").join(&chart_name);
let templates_dir = chart_dir.join("templates");
fs::create_dir_all(&templates_dir)?;
@@ -426,7 +416,7 @@ ingress:
Expand the name of the chart.
*/}}
{{- define "chart.name" -}}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
@@ -434,7 +424,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "chart.fullname" -}}
{{- $name := default .Chart.Name $.Values.nameOverride }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
"#;
@@ -447,12 +437,12 @@ kind: Service
metadata:
name: {{ include "chart.fullname" . }}
spec:
type: {{ $.Values.service.type }}
type: {{ .Values.service.type }}
ports:
- name: main
port: {{ $.Values.service.port | default 3000 }}
targetPort: {{ $.Values.service.port | default 3000 }}
- port: {{ .Values.service.port }}
targetPort: 3000
protocol: TCP
name: http
selector:
app: {{ include "chart.name" . }}
"#;
@@ -465,7 +455,7 @@ kind: Deployment
metadata:
name: {{ include "chart.fullname" . }}
spec:
replicas: {{ $.Values.replicaCount }}
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ include "chart.name" . }}
@@ -476,28 +466,28 @@ spec:
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ $.Values.image.pullPolicy }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: main
containerPort: {{ $.Values.service.port | default 3000 }}
- name: http
containerPort: 3000
protocol: TCP
"#;
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
// Create templates/ingress.yaml
let ingress_yaml = r#"
{{- if $.Values.ingress.enabled -}}
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "chart.fullname" . }}
annotations:
{{- toYaml $.Values.ingress.annotations | nindent 4 }}
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
{{- if $.Values.ingress.tls }}
{{- if .Values.ingress.tls }}
tls:
{{- range $.Values.ingress.tls }}
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
@@ -506,7 +496,7 @@ spec:
{{- end }}
{{- end }}
rules:
{{- range $.Values.ingress.hosts }}
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
@@ -517,7 +507,7 @@ spec:
service:
name: {{ include "chart.fullname" $ }}
port:
number: {{ $.Values.service.port | default 3000 }}
number: 3000
{{- end }}
{{- end }}
{{- end }}
@@ -528,26 +518,26 @@ spec:
}
/// Packages a Helm chart directory into a .tgz file.
fn package_helm_chart(&self, chart_dir: &Path) -> Result<PathBuf, Box<dyn std::error::Error>> {
fn package_helm_chart(
&self,
chart_dir: &PathBuf,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname");
debug!(
info!(
"Launching `helm package {}` cli with CWD {}",
chart_dirname.to_string_lossy(),
&self
.project_root
.join(".harmony_generated")
.join("helm")
.to_string_lossy()
&self.project_root.join("helm").to_string_lossy()
);
let output = process::Command::new("helm")
.args(["package", chart_dirname.to_str().unwrap()])
.current_dir(self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
.current_dir(&self.project_root.join("helm")) // Run package from the parent dir
.output()?;
self.check_output(&output, "Failed to package Helm chart")?;
// Helm prints the path of the created chart to stdout.
let tgz_name = String::from_utf8(output.stdout)?
.trim()
.split_whitespace()
.last()
.unwrap_or_default()
@@ -557,24 +547,20 @@ spec:
}
// The output from helm is relative, so we join it with the execution directory.
Ok(self
.project_root
.join(".harmony_generated")
.join("helm")
.join(tgz_name))
Ok(self.project_root.join("helm").join(tgz_name))
}
/// Pushes a packaged Helm chart to an OCI registry.
fn push_helm_chart(
&self,
packaged_chart_path: &Path,
packaged_chart_path: &PathBuf,
) -> Result<String, Box<dyn std::error::Error>> {
// The chart name is the file stem of the .tgz file
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();
let oci_push_url = format!("oci://{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT);
let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name);
debug!(
info!(
"Pushing Helm chart {} to {}",
packaged_chart_path.to_string_lossy(),
oci_push_url

View File

@@ -41,6 +41,6 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
}
fn name(&self) -> String {
"CertManagerHelmScore".to_string()
format!("CertManagerHelmScore")
}
}

View File

@@ -111,7 +111,7 @@ impl DhcpInterpret {
let boot_filename_outcome = match &self.score.boot_filename {
Some(boot_filename) => {
dhcp_server.set_boot_filename(boot_filename).await?;
dhcp_server.set_boot_filename(&boot_filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set boot filename to {boot_filename}"),
@@ -122,7 +122,7 @@ impl DhcpInterpret {
let filename_outcome = match &self.score.filename {
Some(filename) => {
dhcp_server.set_filename(filename).await?;
dhcp_server.set_filename(&filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename to {filename}"),
@@ -133,7 +133,7 @@ impl DhcpInterpret {
let filename64_outcome = match &self.score.filename64 {
Some(filename64) => {
dhcp_server.set_filename64(filename64).await?;
dhcp_server.set_filename64(&filename64).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename64 to {filename64}"),
@@ -144,7 +144,7 @@ impl DhcpInterpret {
let filenameipxe_outcome = match &self.score.filenameipxe {
Some(filenameipxe) => {
dhcp_server.set_filenameipxe(filenameipxe).await?;
dhcp_server.set_filenameipxe(&filenameipxe).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"),
@@ -209,7 +209,7 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dhcp Interpret execution successful".to_string(),
format!("Dhcp Interpret execution successful"),
))
}
}

View File

@@ -112,7 +112,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dns Interpret execution successful".to_string(),
format!("Dns Interpret execution successful"),
))
}
}

View File

@@ -55,7 +55,7 @@ impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
}
fn name(&self) -> String {
format!("{} [HelmChartScore]", self.release_name)
format!("{} {} HelmChartScore", self.release_name, self.chart_name)
}
}
@@ -90,10 +90,14 @@ impl HelmChartInterpret {
);
match add_output.status.success() {
true => Ok(()),
false => Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
))),
true => {
return Ok(());
}
false => {
return Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
)));
}
}
}
}
@@ -208,7 +212,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
}
let res = helm_executor.install_or_upgrade(
ns,
&ns,
&self.score.release_name,
&self.score.chart_name,
self.score.chart_version.as_ref(),
@@ -216,7 +220,6 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
yaml_path,
Some(&helm_options),
);
let status = match res {
Ok(status) => status,
Err(err) => return Err(InterpretError::new(err.to_string())),
@@ -225,27 +228,24 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
match status {
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Helm Chart {} deployed", self.score.release_name),
"Helm Chart deployed".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending install...", self.score.release_name),
"Helm Chart Pending install".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending upgrade...", self.score.release_name),
"Helm Chart pending upgrade".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(
"Failed to install helm chart".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
"Helm Chart {} installation failed",
self.score.release_name
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmChart
todo!()
}
fn get_version(&self) -> Version {
todo!()
}

View File

@@ -77,11 +77,14 @@ impl HelmCommandExecutor {
)?;
}
let out = self.clone().run_command(
let out = match self.clone().run_command(
self.chart
.clone()
.helm_args(self.globals.chart_home.clone().unwrap()),
)?;
) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap here
let s = String::from_utf8(out.stdout).unwrap();
@@ -95,11 +98,14 @@ impl HelmCommandExecutor {
}
pub fn version(self) -> Result<String, std::io::Error> {
let out = self.run_command(vec![
let out = match self.run_command(vec![
"version".to_string(),
"-c".to_string(),
"--short".to_string(),
])?;
]) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap
Ok(String::from_utf8(out.stdout).unwrap())
@@ -123,11 +129,15 @@ impl HelmCommandExecutor {
None => PathBuf::from(TempDir::new()?.path()),
};
if let Some(yaml_str) = self.chart.values_inline {
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
match self.chart.values_inline {
Some(yaml_str) => {
let tf: TempFile;
tf = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
}
None => (),
};
self.env.insert(
@@ -170,9 +180,9 @@ impl HelmChart {
match self.repo {
Some(r) => {
if r.starts_with("oci://") {
args.push(
args.push(String::from(
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
);
));
} else {
args.push("--repo".to_string());
args.push(r.to_string());
@@ -183,9 +193,12 @@ impl HelmChart {
None => args.push(self.name),
};
if let Some(v) = self.version {
args.push("--version".to_string());
args.push(v.to_string());
match self.version {
Some(v) => {
args.push("--version".to_string());
args.push(v.to_string());
}
None => (),
}
args
@@ -349,7 +362,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmCommand
todo!()
}
fn get_version(&self) -> Version {
todo!()

View File

@@ -10,25 +10,14 @@ use crate::{
topology::{HttpServer, Topology, Url},
};
/// Configure an HTTP server that is provided by the Topology
///
/// This Score will let you easily specify a file path to be served by the HTTP server
///
/// For example, if you have a folder of assets at `/var/www/assets` simply do :
///
/// ```rust,ignore
/// StaticFilesHttpScore {
/// files_to_serve: url!("file:///var/www/assets"),
/// }
/// ```
#[derive(Debug, new, Clone, Serialize)]
pub struct StaticFilesHttpScore {
pub struct HttpScore {
files_to_serve: Url,
}
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
impl<T: Topology + HttpServer> Score<T> for HttpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(StaticFilesHttpInterpret::new(self.clone()))
Box::new(HttpInterpret::new(self.clone()))
}
fn name(&self) -> String {
@@ -37,12 +26,12 @@ impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
}
#[derive(Debug, new, Clone)]
pub struct StaticFilesHttpInterpret {
score: StaticFilesHttpScore,
pub struct HttpInterpret {
score: HttpScore,
}
#[async_trait]
impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
impl<T: Topology + HttpServer> Interpret<T> for HttpInterpret {
async fn execute(
&self,
_inventory: &Inventory,

View File

@@ -1,7 +1,7 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::debug;
use log::info;
use serde::Serialize;
use crate::{
@@ -29,14 +29,14 @@ impl Default for K3DInstallationScore {
}
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(K3dInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
"K3dInstallationScore".into()
todo!()
}
}
@@ -56,15 +56,14 @@ impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
match k3d.ensure_installed().await {
Ok(_client) => {
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
debug!("{msg}");
let msg = format!("k3d cluster {} is installed ", self.score.cluster_name);
info!("{msg}");
Ok(Outcome::success(msg))
}
Err(msg) => Err(InterpretError::new(format!(
"failed to ensure k3d is installed : {msg}"
"K3dInstallationInterpret failed to ensure k3d is installed : {msg}"
))),
}
}

View File

@@ -89,7 +89,7 @@ where
))
}
fn get_name(&self) -> InterpretName {
InterpretName::K8sResource
todo!()
}
fn get_version(&self) -> Version {
todo!()

View File

@@ -128,12 +128,13 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
info!("Deploying score {deployment_score:#?}");
deployment_score.interpret(inventory, topology).await?;
deployment_score
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP deployment_score {deployment_score:?}");
let ingress_path = ingress_path!("/");
let lamp_ingress = K8sIngressScore {
name: fqdn!("lamp-ingress"),
host: fqdn!("test"),
@@ -143,14 +144,17 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
.as_str()
),
port: 8080,
path: Some(ingress_path),
path: Some(ingress_path!("/")),
path_type: None,
namespace: self
.get_namespace()
.map(|nbs| fqdn!(nbs.to_string().as_str())),
};
lamp_ingress.interpret(inventory, topology).await?;
lamp_ingress
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP lamp_ingress {lamp_ingress:?}");
@@ -160,7 +164,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
}
fn get_name(&self) -> InterpretName {
InterpretName::Lamp
todo!()
}
fn get_version(&self) -> Version {
@@ -209,7 +213,7 @@ impl LAMPInterpret {
repository: None,
};
score.interpret(inventory, topology).await
score.create_interpret().execute(inventory, topology).await
}
fn build_dockerfile(&self, score: &LAMPScore) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut dockerfile = Dockerfile::new();

View File

@@ -14,6 +14,5 @@ pub mod monitoring;
pub mod okd;
pub mod opnsense;
pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;

View File

@@ -1,16 +1,9 @@
use std::any::Any;
use std::collections::BTreeMap;
use async_trait::async_trait;
use k8s_openapi::api::core::v1::Secret;
use kube::api::ObjectMeta;
use serde::Serialize;
use serde_json::json;
use serde_yaml::{Mapping, Value};
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
};
use crate::{
interpret::{InterpretError, Outcome},
modules::monitoring::{
@@ -20,7 +13,10 @@ use crate::{
},
prometheus::prometheus::{Prometheus, PrometheusReceiver},
},
topology::{Url, oberservability::monitoring::AlertReceiver},
topology::{
Url,
oberservability::monitoring::{AlertReceiver, AlertSender},
},
};
#[derive(Debug, Clone, Serialize)]
@@ -29,98 +25,14 @@ pub struct DiscordWebhook {
pub url: Url,
}
#[async_trait]
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
let ns = sender.namespace.clone();
let secret_name = format!("{}-secret", self.name.clone());
let webhook_key = format!("{}", self.url.clone());
let mut string_data = BTreeMap::new();
string_data.insert("webhook-url".to_string(), webhook_key.clone());
let secret = Secret {
metadata: kube::core::ObjectMeta {
name: Some(secret_name.clone()),
..Default::default()
},
string_data: Some(string_data),
type_: Some("Opaque".to_string()),
..Default::default()
};
let _ = sender.client.apply(&secret, Some(&ns)).await;
let spec = AlertmanagerConfigSpec {
data: json!({
"route": {
"receiver": self.name,
},
"receivers": [
{
"name": self.name,
"discordConfigs": [
{
"apiURL": {
"name": secret_name,
"key": "webhook-url",
},
"title": "{{ template \"discord.default.title\" . }}",
"message": "{{ template \"discord.default.message\" . }}"
}
]
}
]
}),
};
let alertmanager_configs = AlertmanagerConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(ns),
..Default::default()
},
spec,
};
sender
.client
.apply(&alertmanager_configs, Some(&sender.namespace))
.await?;
Ok(Outcome::success(format!(
"installed crd-alertmanagerconfigs for {}",
self.name
)))
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<CRDPrometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
impl AlertReceiver<Prometheus> for DiscordWebhook {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_receiver(self).await
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<Prometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
@@ -141,12 +53,6 @@ impl AlertReceiver<KubePrometheus> for DiscordWebhook {
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
Box::new(self.clone())
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]

View File

@@ -1,19 +1,11 @@
use std::any::Any;
use async_trait::async_trait;
use kube::api::ObjectMeta;
use log::debug;
use serde::Serialize;
use serde_json::json;
use serde_yaml::{Mapping, Value};
use crate::{
interpret::{InterpretError, Outcome},
modules::monitoring::{
kube_prometheus::{
crd::crd_alertmanager_config::{
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
},
prometheus::{KubePrometheus, KubePrometheusReceiver},
types::{AlertChannelConfig, AlertManagerChannelConfig},
},
@@ -28,81 +20,14 @@ pub struct WebhookReceiver {
pub url: Url,
}
#[async_trait]
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
let spec = AlertmanagerConfigSpec {
data: json!({
"route": {
"receiver": self.name,
},
"receivers": [
{
"name": self.name,
"webhookConfigs": [
{
"url": self.url,
}
]
}
]
}),
};
let alertmanager_configs = AlertmanagerConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(sender.namespace.clone()),
..Default::default()
},
spec,
};
debug!(
"alert manager configs: \n{:#?}",
alertmanager_configs.clone()
);
sender
.client
.apply(&alertmanager_configs, Some(&sender.namespace))
.await?;
Ok(Outcome::success(format!(
"installed crd-alertmanagerconfigs for {}",
self.name
)))
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<CRDPrometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
impl AlertReceiver<Prometheus> for WebhookReceiver {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_receiver(self).await
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<Prometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
@@ -119,15 +44,9 @@ impl AlertReceiver<KubePrometheus> for WebhookReceiver {
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
sender.install_receiver(self).await
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]

View File

@@ -18,7 +18,7 @@ use crate::{
#[async_trait]
impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(self).await
sender.install_rule(&self).await
}
fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> {
Box::new(self.clone())
@@ -28,7 +28,7 @@ impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
#[async_trait]
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(self).await
sender.install_rule(&self).await
}
fn clone_box(&self) -> Box<dyn AlertRule<Prometheus>> {
Box::new(self.clone())

View File

@@ -1,91 +0,0 @@
use std::sync::Arc;
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::{
application::Application,
monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
prometheus::prometheus::PrometheusApplicationMonitoring,
},
score::Score,
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
};
#[derive(Debug, Clone, Serialize)]
pub struct ApplicationMonitoringScore {
pub sender: CRDPrometheus,
pub application: Arc<dyn Application>,
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
}
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
for ApplicationMonitoringScore
{
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(ApplicationMonitoringInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
format!(
"{} monitoring [ApplicationMonitoringScore]",
self.application.name()
)
}
}
#[derive(Debug)]
pub struct ApplicationMonitoringInterpret {
score: ApplicationMonitoringScore,
}
#[async_trait]
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
for ApplicationMonitoringInterpret
{
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let result = topology
.install_prometheus(
&self.score.sender,
inventory,
Some(self.score.receivers.clone()),
)
.await;
match result {
Ok(outcome) => match outcome {
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => Ok(Outcome::noop()),
},
Err(err) => Err(InterpretError::from(err)),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::ApplicationMonitoring
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,45 @@
use std::sync::{Arc, Mutex};
use log::debug;
use serde::Serialize;
use crate::{
modules::monitoring::{
kube_prometheus::types::ServiceMonitor,
prometheus::{prometheus::Prometheus, prometheus_config::HelmPrometheusConfig},
},
score::Score,
topology::{
oberservability::monitoring::{AlertReceiver, AlertRule, AlertingInterpret}, tenant::TenantManager, HelmCommand, K8sclient, Topology
},
};
#[derive(Clone, Debug, Serialize)]
pub struct ApplicationPrometheusMonitoringScore {
pub namespace: String,
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
pub service_monitors: Vec<ServiceMonitor>,
}
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
let config = Arc::new(Mutex::new(HelmPrometheusConfig::new()));
config
.try_lock()
.expect("couldn't lock config")
.additional_service_monitors = self.service_monitors.clone();
let ns = self.namespace.clone();
config.try_lock().expect("couldn't lock config").namespace = Some(ns.clone());
debug!("set namespace to {}", ns);
Box::new(AlertingInterpret {
sender: Prometheus { config },
receivers: self.receivers.clone(),
rules: self.rules.clone(),
})
}
fn name(&self) -> String {
"ApplicationPrometheusMonitoringScore".to_string()
}
}

View File

@@ -1 +1 @@
pub mod application_monitoring_score;
pub mod k8s_application_monitoring_score;

View File

@@ -1,17 +1,53 @@
use non_blank_string_rs::NonBlankString;
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
let values = r#"
let values = format!(
r#"
rbac:
namespaced: true
sidecar:
dashboards:
enabled: true
"#
.to_string();
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus-server.{ns}.svc.cluster.local
isDefault: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: true
editable: true
options:
path: /var/lib/grafana/dashboards/default
dashboards:
default:
compute-usage:
url: https://grafana.com/api/dashboards/315/revisions/1/download
pod-health:
url: https://grafana.com/api/dashboards/15758/revisions/1/download
namespace-resources:
url: https://grafana.com/api/dashboards/9809/revisions/1/download
namespace-resources-vs-quotas:
url: https://grafana.com/api/dashboards/17044/revisions/1/download
persistent-volume-usage:
url: https://grafana.com/api/dashboards/7685/revisions/1/download
"#,
ns = ns
);
HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()),
@@ -19,9 +55,10 @@ sidecar:
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: Some(values.to_string()),
values_yaml: Some(values),
create_namespace: true,
install_only: true,
install_only: false,
repository: None,
}
}

View File

@@ -1,50 +0,0 @@
use std::sync::Arc;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::topology::{
k8s::K8sClient,
oberservability::monitoring::{AlertReceiver, AlertSender},
};
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1alpha1",
kind = "AlertmanagerConfig",
plural = "alertmanagerconfigs",
namespaced
)]
pub struct AlertmanagerConfigSpec {
#[serde(flatten)]
pub data: serde_json::Value,
}
#[derive(Debug, Clone, Serialize)]
pub struct CRDPrometheus {
pub namespace: String,
pub client: Arc<K8sClient>,
}
impl AlertSender for CRDPrometheus {
fn name(&self) -> String {
"CRDAlertManager".to_string()
}
}
impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
fn clone(&self) -> Self {
self.clone_box()
}
}
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}

View File

@@ -1,52 +0,0 @@
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use super::crd_prometheuses::LabelSelector;
/// Rust CRD for `Alertmanager` from Prometheus Operator
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "Alertmanager",
plural = "alertmanagers",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct AlertmanagerSpec {
/// Number of replicas for HA
pub replicas: i32,
/// Selectors for AlertmanagerConfig CRDs
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alertmanager_config_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alertmanager_config_namespace_selector: Option<LabelSelector>,
/// Optional pod template metadata (annotations, labels)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pod_metadata: Option<LabelSelector>,
/// Optional topology spread settings
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
impl Default for AlertmanagerSpec {
fn default() -> Self {
AlertmanagerSpec {
replicas: 1,
// Match all AlertmanagerConfigs in the same namespace
alertmanager_config_namespace_selector: None,
// Empty selector matches all AlertmanagerConfigs in that namespace
alertmanager_config_selector: Some(LabelSelector::default()),
pod_metadata: None,
version: None,
}
}
}

View File

@@ -1,25 +0,0 @@
use crate::modules::prometheus::alerts::k8s::{
deployment::alert_deployment_unavailable,
pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
pvc::high_pvc_fill_rate_over_two_days,
service::alert_service_down,
};
use super::crd_prometheus_rules::Rule;
pub fn build_default_application_rules() -> Vec<Rule> {
let pod_failed: Rule = pod_failed().into();
let container_restarting: Rule = alert_container_restarting().into();
let pod_not_ready: Rule = alert_pod_not_ready().into();
let service_down: Rule = alert_service_down().into();
let deployment_unavailable: Rule = alert_deployment_unavailable().into();
let high_pvc_fill_rate: Rule = high_pvc_fill_rate_over_two_days().into();
vec![
pod_failed,
container_restarting,
pod_not_ready,
service_down,
deployment_unavailable,
high_pvc_fill_rate,
]
}

View File

@@ -1,153 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use super::crd_prometheuses::LabelSelector;
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "Grafana",
plural = "grafanas",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub config: Option<GrafanaConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_user: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ingress: Option<GrafanaIngress>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub persistence: Option<GrafanaPersistence>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resources: Option<ResourceRequirements>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub log: Option<GrafanaLogConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub security: Option<GrafanaSecurityConfig>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaLogConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub mode: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSecurityConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_user: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaIngress {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hosts: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaPersistence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub storage_class_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
}
// ------------------------------------------------------------------------------------------------
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "GrafanaDashboard",
plural = "grafanadashboards",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDashboardSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resync_period: Option<String>,
pub instance_selector: LabelSelector,
pub json: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "GrafanaDatasource",
plural = "grafanadatasources",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceSpec {
pub instance_selector: LabelSelector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub allow_cross_namespace_import: Option<bool>,
pub datasource: GrafanaDatasourceConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceConfig {
pub access: String,
pub database: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub json_data: Option<BTreeMap<String, String>>,
pub name: String,
pub r#type: String,
pub url: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct ResourceRequirements {
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub limits: BTreeMap<String, String>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub requests: BTreeMap<String, String>,
}

View File

@@ -1,57 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "PrometheusRule",
plural = "prometheusrules",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct PrometheusRuleSpec {
pub groups: Vec<RuleGroup>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct RuleGroup {
pub name: String,
pub rules: Vec<Rule>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct Rule {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alert: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub expr: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub for_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub labels: Option<std::collections::BTreeMap<String, String>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub annotations: Option<std::collections::BTreeMap<String, String>>,
}
impl From<PrometheusAlertRule> for Rule {
fn from(value: PrometheusAlertRule) -> Self {
Rule {
alert: Some(value.alert),
expr: Some(value.expr),
for_: value.r#for,
labels: Some(value.labels.into_iter().collect::<BTreeMap<_, _>>()),
annotations: Some(value.annotations.into_iter().collect::<BTreeMap<_, _>>()),
}
}
}

View File

@@ -1,118 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::types::Operator;
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "Prometheus",
plural = "prometheuses",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct PrometheusSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alerting: Option<PrometheusSpecAlerting>,
pub service_account_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_monitor_namespace_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_monitor_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_discovery_role: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pod_monitor_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rule_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rule_namespace_selector: Option<LabelSelector>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct NamespaceSelector {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_names: Vec<String>,
}
/// Contains alerting configuration, specifically Alertmanager endpoints.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
pub struct PrometheusSpecAlerting {
#[serde(skip_serializing_if = "Option::is_none")]
pub alertmanagers: Option<Vec<AlertmanagerEndpoints>>,
}
/// Represents an Alertmanager endpoint configuration used by Prometheus.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
pub struct AlertmanagerEndpoints {
/// Name of the Alertmanager Service.
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// Namespace of the Alertmanager Service.
#[serde(skip_serializing_if = "Option::is_none")]
pub namespace: Option<String>,
/// Port to access on the Alertmanager Service (e.g. "web").
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<String>,
/// Scheme to use for connecting (e.g. "http").
#[serde(skip_serializing_if = "Option::is_none")]
pub scheme: Option<String>,
// Other fields like `tls_config`, `path_prefix`, etc., can be added if needed.
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct LabelSelector {
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub match_labels: BTreeMap<String, String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_expressions: Vec<LabelSelectorRequirement>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct LabelSelectorRequirement {
pub key: String,
pub operator: Operator,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
}
impl Default for PrometheusSpec {
fn default() -> Self {
PrometheusSpec {
alerting: None,
service_account_name: "prometheus".into(),
// null means "only my namespace"
service_monitor_namespace_selector: None,
// empty selector means match all ServiceMonitors in that namespace
service_monitor_selector: Some(LabelSelector::default()),
service_discovery_role: Some("Endpoints".into()),
pod_monitor_selector: None,
rule_selector: None,
rule_namespace_selector: Some(LabelSelector::default()),
}
}
}

View File

@@ -1,203 +0,0 @@
pub fn build_default_dashboard(namespace: &str) -> String {
let dashboard = format!(
r#"{{
"annotations": {{
"list": []
}},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 171105,
"panels": [
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 6,
"x": 0,
"y": 0
}},
"id": 1,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(kube_pod_status_phase{{namespace=\"{namespace}\", phase=\"Running\"}})",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Pods in Namespace",
"type": "stat"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 6,
"x": 6,
"y": 0
}},
"id": 2,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(kube_pod_status_phase{{phase=\"Failed\", namespace=\"{namespace}\"}})",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Pods in Failed State",
"type": "stat"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "percentunit"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 12,
"x": 0,
"y": 6
}},
"id": 3,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(kube_deployment_status_replicas_available{{namespace=\"{namespace}\"}}) / sum(kube_deployment_spec_replicas{{namespace=\"{namespace}\"}})",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Deployment Health (Available / Desired)",
"type": "stat"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 12,
"x": 0,
"y": 12
}},
"id": 4,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum by(pod) (rate(kube_pod_container_status_restarts_total{{namespace=\"{namespace}\"}}[5m]))",
"legendFormat": "{{{{pod}}}}",
"refId": "A"
}}
],
"title": "Container Restarts (per pod)",
"type": "timeseries"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 12,
"x": 0,
"y": 18
}},
"id": 5,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(ALERTS{{alertstate=\"firing\", namespace=\"{namespace}\"}}) or vector(0)",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Firing Alerts in Namespace",
"type": "stat"
}}
],
"schemaVersion": 36,
"templating": {{
"list": [
{{
"name": "datasource",
"type": "datasource",
"pluginId": "prometheus",
"label": "Prometheus",
"query": "prometheus",
"refresh": 1,
"hide": 0,
"current": {{
"selected": true,
"text": "Prometheus",
"value": "Prometheus"
}}
}}
]
}},
"title": "Tenant Namespace Overview",
"version": 1
}}"#
);
dashboard
}

View File

@@ -1,20 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_operator_helm_chart_score(ns: String) -> HelmChartScore {
HelmChartScore {
namespace: Some(NonBlankString::from_str(&ns).unwrap()),
release_name: NonBlankString::from_str("grafana_operator").unwrap(),
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana-operator")
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: None,
create_namespace: true,
install_only: true,
repository: None,
}
}

View File

@@ -1,11 +0,0 @@
pub mod crd_alertmanager_config;
pub mod crd_alertmanagers;
pub mod crd_default_rules;
pub mod crd_grafana;
pub mod crd_prometheus_rules;
pub mod crd_prometheuses;
pub mod grafana_default_dashboard;
pub mod grafana_operator;
pub mod prometheus_operator;
pub mod role;
pub mod service_monitor;

View File

@@ -1,22 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
pub fn prometheus_operator_helm_chart_score(ns: String) -> HelmChartScore {
HelmChartScore {
namespace: Some(NonBlankString::from_str(&ns).unwrap()),
release_name: NonBlankString::from_str("prometheus-operator").unwrap(),
chart_name: NonBlankString::from_str(
"oci://hub.nationtech.io/harmony/nt-prometheus-operator",
)
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: None,
create_namespace: true,
install_only: true,
repository: None,
}
}

View File

@@ -1,62 +0,0 @@
use k8s_openapi::api::{
core::v1::ServiceAccount,
rbac::v1::{PolicyRule, Role, RoleBinding, RoleRef, Subject},
};
use kube::api::ObjectMeta;
pub fn build_prom_role(role_name: String, namespace: String) -> Role {
Role {
metadata: ObjectMeta {
name: Some(role_name),
namespace: Some(namespace),
..Default::default()
},
rules: Some(vec![PolicyRule {
api_groups: Some(vec!["".into()]), // core API group
resources: Some(vec!["services".into(), "endpoints".into(), "pods".into()]),
verbs: vec!["get".into(), "list".into(), "watch".into()],
..Default::default()
}]),
}
}
pub fn build_prom_rolebinding(
role_name: String,
namespace: String,
service_account_name: String,
) -> RoleBinding {
RoleBinding {
metadata: ObjectMeta {
name: Some(format!("{}-rolebinding", role_name)),
namespace: Some(namespace.clone()),
..Default::default()
},
role_ref: RoleRef {
api_group: "rbac.authorization.k8s.io".into(),
kind: "Role".into(),
name: role_name,
},
subjects: Some(vec![Subject {
kind: "ServiceAccount".into(),
name: service_account_name,
namespace: Some(namespace.clone()),
..Default::default()
}]),
}
}
pub fn build_prom_service_account(
service_account_name: String,
namespace: String,
) -> ServiceAccount {
ServiceAccount {
automount_service_account_token: None,
image_pull_secrets: None,
metadata: ObjectMeta {
name: Some(service_account_name),
namespace: Some(namespace),
..Default::default()
},
secrets: None,
}
}

View File

@@ -1,87 +0,0 @@
use std::collections::HashMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::types::{
HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector,
ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint,
};
/// This is the top-level struct for the ServiceMonitor Custom Resource.
/// The `#[derive(CustomResource)]` macro handles all the boilerplate for you,
/// including the `impl Resource`.
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "ServiceMonitor",
plural = "servicemonitors",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct ServiceMonitorSpec {
/// A label selector to select services to monitor.
pub selector: Selector,
/// A list of endpoints on the selected services to be monitored.
pub endpoints: Vec<ServiceMonitorEndpoint>,
/// Selector to select which namespaces the Kubernetes Endpoints objects
/// are discovered from.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace_selector: Option<NamespaceSelector>,
/// The label to use to retrieve the job name from.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub job_label: Option<String>,
/// Pod-based target labels to transfer from the Kubernetes Pod onto the target.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub pod_target_labels: Vec<String>,
/// TargetLabels transfers labels on the Kubernetes Service object to the target.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub target_labels: Vec<String>,
}
impl Default for ServiceMonitorSpec {
fn default() -> Self {
let labels = HashMap::new();
Self {
selector: Selector {
match_labels: { labels },
match_expressions: vec![MatchExpression {
key: "app.kubernetes.io/name".into(),
operator: Operator::Exists,
values: vec![],
}],
},
endpoints: vec![ServiceMonitorEndpoint {
port: Some("http".to_string()),
path: Some("/metrics".into()),
interval: Some("30s".into()),
scheme: Some(HTTPScheme::HTTP),
..Default::default()
}],
namespace_selector: None, // only the same namespace
job_label: Some("app".into()),
pod_target_labels: vec![],
target_labels: vec![],
}
}
}
impl From<KubeServiceMonitor> for ServiceMonitorSpec {
fn from(value: KubeServiceMonitor) -> Self {
Self {
selector: value.selector,
endpoints: value.endpoints,
namespace_selector: value.namespace_selector,
job_label: value.job_label,
pod_target_labels: value.pod_target_labels,
target_labels: value.target_labels,
}
}
}

View File

@@ -27,12 +27,6 @@ pub struct KubePrometheusConfig {
pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
pub additional_service_monitors: Vec<ServiceMonitor>,
}
impl Default for KubePrometheusConfig {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheusConfig {
pub fn new() -> Self {
Self {
@@ -41,18 +35,18 @@ impl KubePrometheusConfig {
windows_monitoring: false,
alert_manager: true,
grafana: true,
node_exporter: true,
node_exporter: false,
prometheus: true,
kubernetes_service_monitors: true,
kubernetes_api_server: true,
kubernetes_api_server: false,
kubelet: true,
kube_controller_manager: true,
kube_etcd: true,
kube_proxy: true,
kube_controller_manager: false,
kube_etcd: false,
kube_proxy: false,
kube_state_metrics: true,
prometheus_operator: true,
core_dns: true,
kube_scheduler: true,
core_dns: false,
kube_scheduler: false,
alert_receiver_configs: vec![],
alert_rules: vec![],
additional_service_monitors: vec![],

View File

@@ -12,8 +12,8 @@ use crate::modules::{
helm::chart::HelmChartScore,
monitoring::kube_prometheus::types::{
AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig,
AlertManagerConfigSelector, AlertManagerRoute, AlertManagerSpec, AlertManagerValues,
ConfigReloader, Limits, PrometheusConfig, Requests, Resources,
AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits,
PrometheusConfig, Requests, Resources,
},
};
@@ -35,7 +35,7 @@ pub fn kube_prometheus_helm_chart_score(
let kube_proxy = config.kube_proxy.to_string();
let kube_state_metrics = config.kube_state_metrics.to_string();
let node_exporter = config.node_exporter.to_string();
let _prometheus_operator = config.prometheus_operator.to_string();
let prometheus_operator = config.prometheus_operator.to_string();
let prometheus = config.prometheus.to_string();
let resource_limit = Resources {
limits: Limits {
@@ -64,18 +64,18 @@ pub fn kube_prometheus_helm_chart_score(
indent_lines(&yaml, indent_level + 2)
)
}
let _resource_section = resource_block(&resource_limit, 2);
let resource_section = resource_block(&resource_limit, 2);
let mut values = format!(
r#"
global:
rbac:
create: true
create: false
prometheus:
enabled: {prometheus}
prometheusSpec:
resources:
requests:
requests:
cpu: 100m
memory: 500Mi
limits:
@@ -121,7 +121,7 @@ defaultRules:
windowsMonitoring:
enabled: {windows_monitoring}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -130,13 +130,13 @@ windowsMonitoring:
grafana:
enabled: {grafana}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
initChownData:
initChownData:
resources:
requests:
cpu: 10m
@@ -157,7 +157,7 @@ kubernetesServiceMonitors:
kubeApiServer:
enabled: {kubernetes_api_server}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -166,7 +166,7 @@ kubeApiServer:
kubelet:
enabled: {kubelet}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -175,7 +175,7 @@ kubelet:
kubeControllerManager:
enabled: {kube_controller_manager}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -184,7 +184,7 @@ kubeControllerManager:
coreDns:
enabled: {core_dns}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -193,7 +193,7 @@ coreDns:
kubeEtcd:
enabled: {kube_etcd}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -202,7 +202,7 @@ kubeEtcd:
kubeScheduler:
enabled: {kube_scheduler}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -211,7 +211,7 @@ kubeScheduler:
kubeProxy:
enabled: {kube_proxy}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -221,7 +221,7 @@ kubeStateMetrics:
enabled: {kube_state_metrics}
kube-state-metrics:
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -230,7 +230,7 @@ kube-state-metrics:
nodeExporter:
enabled: {node_exporter}
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -238,16 +238,16 @@ nodeExporter:
memory: 250Mi
prometheus-node-exporter:
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
prometheusOperator:
enabled: true
enabled: false
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -255,7 +255,7 @@ prometheusOperator:
memory: 200Mi
prometheusConfigReloader:
resources:
requests:
requests:
cpu: 100m
memory: 150Mi
limits:
@@ -267,7 +267,7 @@ prometheusOperator:
limits:
cpu: 10m
memory: 100Mi
requests:
requests:
cpu: 10m
memory: 100Mi
patch:
@@ -275,7 +275,7 @@ prometheusOperator:
limits:
cpu: 10m
memory: 100Mi
requests:
requests:
cpu: 10m
memory: 100Mi
"#,
@@ -332,11 +332,6 @@ prometheusOperator:
.push(receiver.channel_receiver.clone());
}
let mut labels = BTreeMap::new();
labels.insert("alertmanagerConfig".to_string(), "enabled".to_string());
let alert_manager_config_selector = AlertManagerConfigSelector {
match_labels: labels,
};
let alert_manager_values = AlertManagerValues {
alertmanager: AlertManager {
enabled: config.alert_manager,
@@ -352,8 +347,6 @@ prometheusOperator:
cpu: "100m".to_string(),
},
},
alert_manager_config_selector,
replicas: 2,
},
init_config_reloader: ConfigReloader {
resources: Resources {

View File

@@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
.expect("couldn't lock config")
.additional_service_monitors = self.service_monitors.clone();
Box::new(AlertingInterpret {
sender: KubePrometheus { config },
sender: KubePrometheus::new(),
receivers: self.receivers.clone(),
rules: self.rules.clone(),
})

View File

@@ -1,4 +1,3 @@
pub mod crd;
pub mod helm;
pub mod helm_prometheus_alert_score;
pub mod prometheus;

View File

@@ -55,12 +55,6 @@ pub struct KubePrometheus {
pub config: Arc<Mutex<KubePrometheusConfig>>,
}
impl Default for KubePrometheus {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheus {
pub fn new() -> Self {
Self {
@@ -119,7 +113,8 @@ impl KubePrometheus {
topology: &T,
) -> Result<Outcome, InterpretError> {
kube_prometheus_helm_chart_score(self.config.clone())
.interpret(inventory, topology)
.create_interpret()
.execute(inventory, topology)
.await
}
}

View File

@@ -1,8 +1,7 @@
use std::collections::{BTreeMap, HashMap};
use async_trait::async_trait;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde::Serialize;
use serde_yaml::{Mapping, Sequence, Value};
use crate::modules::monitoring::alert_rule::prometheus_alert_rule::AlertManagerRuleGroup;
@@ -56,14 +55,6 @@ pub struct AlertManagerChannelConfig {
#[serde(rename_all = "camelCase")]
pub struct AlertManagerSpec {
pub(crate) resources: Resources,
pub replicas: u32,
pub alert_manager_config_selector: AlertManagerConfigSelector,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct AlertManagerConfigSelector {
pub match_labels: BTreeMap<String, String>,
}
#[derive(Debug, Clone, Serialize)]
@@ -95,7 +86,7 @@ pub struct AlertGroup {
pub groups: Vec<AlertManagerRuleGroup>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[derive(Debug, Clone, Serialize)]
pub enum HTTPScheme {
#[serde(rename = "http")]
HTTP,
@@ -103,7 +94,7 @@ pub enum HTTPScheme {
HTTPS,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[derive(Debug, Clone, Serialize)]
pub enum Operator {
In,
NotIn,
@@ -148,87 +139,80 @@ pub struct ServiceMonitorTLSConfig {
pub server_name: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ServiceMonitorEndpoint {
/// Name of the service port this endpoint refers to.
// ## Name of the endpoint's service port
// ## Mutually exclusive with targetPort
pub port: Option<String>,
/// Interval at which metrics should be scraped.
#[serde(default, skip_serializing_if = "Option::is_none")]
// ## Name or number of the endpoint's target port
// ## Mutually exclusive with port
pub target_port: Option<String>,
// ## File containing bearer token to be used when scraping targets
// ##
pub bearer_token_file: Option<String>,
// ## Interval at which metrics should be scraped
// ##
pub interval: Option<String>,
/// The HTTP path to scrape for metrics.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
// ## HTTP path to scrape for metrics
// ##
pub path: String,
/// HTTP scheme to use for scraping.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scheme: Option<HTTPScheme>,
// ## HTTP scheme to use for scraping
// ##
pub scheme: HTTPScheme,
/// Relabelings to apply to samples before scraping.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub relabelings: Vec<RelabelConfig>,
// ## TLS configuration to use when scraping the endpoint
// ##
pub tls_config: Option<ServiceMonitorTLSConfig>,
/// MetricRelabelings to apply to samples after scraping, but before ingestion.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub metric_relabelings: Vec<RelabelConfig>,
// ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
// ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
// ##
// # - action: keep
// # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
// # sourceLabels: [__name__]
pub metric_relabelings: Vec<Mapping>,
// ## RelabelConfigs to apply to samples before scraping
// ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
// ##
// # - sourceLabels: [__meta_kubernetes_pod_node_name]
// # separator: ;
// # regex: ^(.*)$
// # targetLabel: nodename
// # replacement: $1
// # action: replace
pub relabelings: Vec<Mapping>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct RelabelConfig {
/// The action to perform based on the regex matching.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub action: Option<String>,
/// A list of labels from which to extract values.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub source_labels: Vec<String>,
/// Separator to be used when concatenating source_labels.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub separator: Option<String>,
/// The label to which the resulting value is written.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target_label: Option<String>,
/// A regular expression to match against the concatenated source label values.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub regex: Option<String>,
/// The replacement value to use.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub replacement: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct MatchExpression {
pub key: String,
pub operator: Operator, // "In", "NotIn", "Exists", "DoesNotExist"
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub operator: Operator,
pub values: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Selector {
/// A map of key-value pairs to match.
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
// # label selector for services
pub match_labels: HashMap<String, String>,
/// A list of label selector requirements.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_expressions: Vec<MatchExpression>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ServiceMonitor {
pub name: String,
pub namespace: String,
// # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart
pub additional_labels: Option<HashMap<String, String>>,
@@ -268,15 +252,10 @@ pub struct ServiceMonitor {
pub fallback_scrape_protocol: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
#[derive(Debug, Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct NamespaceSelector {
/// Select all namespaces.
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
pub any: bool,
/// List of namespace names to select from.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_names: Vec<String>,
}
@@ -284,6 +263,7 @@ impl Default for ServiceMonitor {
fn default() -> Self {
Self {
name: Default::default(),
namespace: Default::default(),
additional_labels: Default::default(),
job_label: Default::default(),
target_labels: Default::default(),
@@ -298,3 +278,19 @@ impl Default for ServiceMonitor {
}
}
}
impl Default for ServiceMonitorEndpoint {
fn default() -> Self {
Self {
port: Some("80".to_string()),
target_port: Default::default(),
bearer_token_file: Default::default(),
interval: Default::default(),
path: "/metrics".to_string(),
scheme: HTTPScheme::HTTP,
tls_config: Default::default(),
metric_relabelings: Default::default(),
relabelings: Default::default(),
}
}
}

View File

@@ -1,25 +1,9 @@
use non_blank_string_rs::NonBlankString;
use std::str::FromStr;
use crate::{modules::helm::chart::HelmChartScore, topology::DeploymentTarget};
pub fn ntfy_helm_chart_score(
namespace: String,
host: String,
target: DeploymentTarget,
) -> HelmChartScore {
// TODO not actually the correct logic, this should be fixed by using an ingresss which is the
// correct k8s standard.
//
// Another option is to delegate to the topology the ingress technology it wants to use Route,
// Ingress or other
let route_enabled = match target {
DeploymentTarget::LocalDev => false,
DeploymentTarget::Staging => true,
DeploymentTarget::Production => true,
};
let ingress_enabled = !route_enabled;
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
pub fn ntfy_helm_chart_score(namespace: String) -> HelmChartScore {
let values = format!(
r#"
replicaCount: 1
@@ -41,14 +25,23 @@ serviceAccount:
service:
type: ClusterIP
port: 8080
port: 80
ingress:
enabled: {ingress_enabled}
enabled: false
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: ntfy.host.com
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
route:
enabled: {route_enabled}
host: {host}
autoscaling:
enabled: false
@@ -56,7 +49,7 @@ autoscaling:
config:
enabled: true
data:
base-url: "https://{host}"
# base-url: "https://ntfy.something.com"
auth-file: "/var/cache/ntfy/user.db"
auth-default-access: "deny-all"
cache-file: "/var/cache/ntfy/cache.db"
@@ -65,8 +58,6 @@ config:
# web-root: "disable"
enable-signup: false
enable-login: "true"
enable-metrics: "true"
listen-http: ":8080"
persistence:
enabled: true
@@ -77,12 +68,16 @@ persistence:
HelmChartScore {
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str("ntfy").unwrap(),
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/harmony/ntfy").unwrap(),
chart_version: Some(NonBlankString::from_str("0.1.7-nationtech.1").unwrap()),
chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(),
chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()),
values_overrides: None,
values_yaml: Some(values.to_string()),
create_namespace: true,
install_only: false,
repository: None,
repository: Some(HelmRepository::new(
"sarab97".to_string(),
url::Url::parse("https://charts.sarabsingh.com").unwrap(),
true,
)),
}
}

View File

@@ -1,3 +1,2 @@
pub mod helm;
#[allow(clippy::module_inception)]
pub mod ntfy;

View File

@@ -1,7 +1,7 @@
use std::sync::Arc;
use async_trait::async_trait;
use log::info;
use log::debug;
use serde::Serialize;
use strum::{Display, EnumString};
@@ -11,16 +11,15 @@ use crate::{
inventory::Inventory,
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
score::Score,
topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient},
topology::{HelmCommand, K8sclient, Topology, k8s::K8sClient},
};
#[derive(Debug, Clone, Serialize)]
pub struct NtfyScore {
pub namespace: String,
pub host: String,
}
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Score<T> for NtfyScore {
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(NtfyInterpret {
score: self.clone(),
@@ -28,7 +27,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Score<T> for N
}
fn name(&self) -> String {
"alert receiver [NtfyScore]".into()
format!("Ntfy")
}
}
@@ -39,21 +38,31 @@ pub struct NtfyInterpret {
#[derive(Debug, EnumString, Display)]
enum NtfyAccessMode {
#[strum(serialize = "read-write", serialize = "rw")]
#[strum(serialize = "read-write", serialize = "rw", to_string = "read-write")]
ReadWrite,
#[strum(serialize = "read-only", serialize = "ro", serialize = "read")]
#[strum(
serialize = "read-only",
serialize = "ro",
serialize = "read",
to_string = "read-only"
)]
ReadOnly,
#[strum(serialize = "write-only", serialize = "wo", serialize = "write")]
#[strum(
serialize = "write-only",
serialize = "wo",
serialize = "write",
to_string = "write-only"
)]
WriteOnly,
#[strum(serialize = "deny", serialize = "none")]
#[strum(serialize = "none", to_string = "deny")]
Deny,
}
#[derive(Debug, EnumString, Display)]
enum NtfyRole {
#[strum(serialize = "user")]
#[strum(serialize = "user", to_string = "user")]
User,
#[strum(serialize = "admin")]
#[strum(serialize = "admin", to_string = "admin")]
Admin,
}
@@ -77,7 +86,7 @@ impl NtfyInterpret {
vec![
"sh",
"-c",
format!("NTFY_PASSWORD={password} ntfy user add --role={role} --ignore-exists {username}")
format!("NTFY_PASSWORD={password} ntfy user add --role={role} {username}")
.as_str(),
],
)
@@ -85,52 +94,69 @@ impl NtfyInterpret {
Ok(())
}
async fn set_access(
&self,
k8s_client: Arc<K8sClient>,
username: &str,
topic: &str,
mode: NtfyAccessMode,
) -> Result<(), String> {
k8s_client
.exec_app(
"ntfy".to_string(),
Some(&self.score.namespace),
vec![
"sh",
"-c",
format!("ntfy access {username} {topic} {mode}").as_str(),
],
)
.await?;
Ok(())
}
}
/// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
#[async_trait]
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> for NtfyInterpret {
impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
ntfy_helm_chart_score(
self.score.namespace.clone(),
self.score.host.clone(),
topology.current_target(),
)
.interpret(inventory, topology)
.await?;
ntfy_helm_chart_score(self.score.namespace.clone())
.create_interpret()
.execute(inventory, topology)
.await?;
info!("installed ntfy helm chart");
debug!("installed ntfy helm chart");
let client = topology
.k8s_client()
.await
.expect("couldn't get k8s client");
info!("deploying ntfy...");
client
.wait_until_deployment_ready(
"ntfy".to_string(),
Some(self.score.namespace.as_str()),
Some(&self.score.namespace.as_str()),
None,
)
.await?;
info!("ntfy deployed");
debug!("created k8s client");
info!("adding user harmony");
self.add_user(client, "harmony", "harmony", Some(NtfyRole::Admin))
.await?;
info!("user added");
Ok(Outcome::success("Ntfy installed".to_string()))
debug!("exec into pod done");
Ok(Outcome::success("installed ntfy".to_string()))
}
fn get_name(&self) -> InterpretName {
InterpretName::Ntfy
todo!()
}
fn get_version(&self) -> Version {
todo!()
}

View File

@@ -1 +1,2 @@
pub mod prometheus_helm;
pub mod types;

View File

@@ -1,37 +1,145 @@
use std::collections::BTreeMap;
use std::str::FromStr;
use std::sync::{Arc, Mutex};
use log::debug;
use non_blank_string_rs::NonBlankString;
use serde_yaml::{Mapping, Value};
use crate::modules::{
helm::chart::HelmChartScore, monitoring::prometheus::prometheus_config::PrometheusConfig,
use crate::modules::helm::chart::HelmChartScore;
use crate::modules::monitoring::kube_prometheus::types::{
AlertGroup, AlertManager, AlertManagerConfig, AlertManagerRoute, AlertManagerSpec,
ConfigReloader, Limits, Requests, Resources,
};
use crate::modules::monitoring::prometheus::helm::types::{
AlertFile, EnabledConfig, KsmRbacConfig, KubeStateMetricsConfig, LabelSelector, Monitor,
Prometheus, PrometheusHelmValues, RbacConfig, ServerConfig, ServerRbacConfig,
};
use crate::modules::monitoring::prometheus::prometheus_config::HelmPrometheusConfig;
pub fn prometheus_helm_chart_score(config: Arc<Mutex<PrometheusConfig>>) -> HelmChartScore {
pub fn prometheus_helm_chart_score(config: Arc<Mutex<HelmPrometheusConfig>>) -> HelmChartScore {
let config = config.lock().unwrap();
let ns = config.namespace.clone().unwrap();
let values = format!(
r#"
rbac:
create: true
kube-state-metrics:
enabled: false
nodeExporter:
enabled: false
alertmanager:
enabled: false
pushgateway:
enabled: false
server:
serviceAccount:
create: false
rbac:
create: true
fullnameOverride: prometheus-{ns}
"#
let rbac_config = RbacConfig { create: false };
let ksm_config = KubeStateMetricsConfig {
enabled: true,
rbac: KsmRbacConfig {
use_cluster_role: false,
},
prometheus: Prometheus {
monitor: Monitor { enabled: true },
},
};
let mut selector_labels = BTreeMap::new();
selector_labels.insert("kubernetes.io/metadata.name".to_string(), ns.clone());
let mut kube_state_metrics_labels = BTreeMap::new();
kube_state_metrics_labels.insert(
"app.kubernetes.io/name".to_string(),
"kube-state-metrics".to_string(),
);
let selector = LabelSelector {
match_labels: selector_labels,
};
let server_config = ServerConfig {
namespaces: vec![ns.clone()],
use_existing_cluster_role_name: false,
};
let mut null_receiver = Mapping::new();
null_receiver.insert(
Value::String("receiver".to_string()),
Value::String("default-receiver".to_string()),
);
null_receiver.insert(
Value::String("matchers".to_string()),
Value::Sequence(vec![Value::String("alertname!=Watchdog".to_string())]),
);
null_receiver.insert(Value::String("continue".to_string()), Value::Bool(true));
let mut alert_manager_channel_config = AlertManagerConfig {
global: Mapping::new(),
route: AlertManagerRoute {
routes: vec![Value::Mapping(null_receiver)],
},
receivers: vec![serde_yaml::from_str("name: 'default-receiver'").unwrap()],
};
for receiver in config.alert_receiver_configs.iter() {
if let Some(global) = receiver.channel_global_config.clone() {
alert_manager_channel_config
.global
.insert(global.0, global.1);
}
alert_manager_channel_config
.route
.routes
.push(receiver.channel_route.clone());
alert_manager_channel_config
.receivers
.push(receiver.channel_receiver.clone());
}
let alert_manager_values = AlertManager {
enabled: config.alert_manager,
config: alert_manager_channel_config,
alertmanager_spec: AlertManagerSpec {
resources: Resources {
limits: Limits {
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
requests: Requests {
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
},
},
init_config_reloader: ConfigReloader {
resources: Resources {
limits: Limits {
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
requests: Requests {
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
},
},
};
let mut result: BTreeMap<String, AlertFile> = BTreeMap::new();
for rule in config.alert_rules.clone().iter() {
for (name, group) in &rule.rules {
result
.entry("alerting_rules.yml".to_string())
.and_modify(|e| e.groups.extend(group.groups.clone()))
.or_insert(AlertFile {
groups: group.groups.clone(),
});
}
}
let final_values = PrometheusHelmValues {
rbac: rbac_config,
kube_state_metrics: ksm_config,
server: server_config,
alertmanager: alert_manager_values,
server_files: result,
additional_service_monitors: config.additional_service_monitors.clone(),
prometheus_node_exporter: EnabledConfig { enabled: false },
prometheus_pushgateway: EnabledConfig { enabled: false },
};
let values_yaml =
serde_yaml::to_string(&final_values).expect("Failed to serialize final Helm values");
debug!("full values.yaml: \n{}", values_yaml);
HelmChartScore {
namespace: Some(NonBlankString::from_str(&config.namespace.clone().unwrap()).unwrap()),
namespace: Some(NonBlankString::from_str(&ns).unwrap()),
release_name: NonBlankString::from_str("prometheus").unwrap(),
chart_name: NonBlankString::from_str(
"oci://ghcr.io/prometheus-community/charts/prometheus",
@@ -39,7 +147,7 @@ fullnameOverride: prometheus-{ns}
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: Some(values.to_string()),
values_yaml: Some(values_yaml),
create_namespace: true,
install_only: true,
repository: None,

View File

@@ -0,0 +1,94 @@
use std::collections::BTreeMap;
use serde::Serialize;
use crate::modules::monitoring::{alert_rule::prometheus_alert_rule::AlertManagerRuleGroup, kube_prometheus::types::{
AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerValues, ServiceMonitor
}};
#[derive(Debug, Clone, Serialize)]
pub struct RuleFilesConfig {
#[serde(rename = "ruleFiles")]
pub files: BTreeMap<String, AlertGroup>,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct PrometheusHelmValues {
pub rbac: RbacConfig,
#[serde(rename = "kube-state-metrics")]
pub kube_state_metrics: KubeStateMetricsConfig,
pub server: ServerConfig,
pub alertmanager: AlertManager, // You already have this
#[serde(rename = "serverFiles")]
pub server_files: BTreeMap<String, AlertFile>, // You already have this
pub additional_service_monitors: Vec<ServiceMonitor>, // You already have this
#[serde(rename = "prometheus-node-exporter")]
pub prometheus_node_exporter: EnabledConfig,
#[serde(rename = "prometheus-pushgateway")]
pub prometheus_pushgateway: EnabledConfig,
}
#[derive(Serialize, Debug, Clone)]
pub struct AlertFile {
pub groups: Vec<AlertManagerRuleGroup>,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RbacConfig {
pub create: bool,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct KubeStateMetricsConfig {
pub enabled: bool,
pub rbac: KsmRbacConfig,
pub prometheus: Prometheus,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Prometheus {
pub monitor: Monitor
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Monitor{
pub enabled: bool
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct KsmRbacConfig {
pub use_cluster_role: bool,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct ServerConfig {
pub namespaces: Vec<String>,
pub use_existing_cluster_role_name: bool,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct ServerRbacConfig {
pub create: bool,
pub use_cluster_role: bool,
pub namespaced: bool,
}
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct LabelSelector {
#[serde(rename = "matchLabels")]
pub match_labels: BTreeMap<String, String>,
}
#[derive(Serialize, Debug)]
pub struct EnabledConfig {
pub enabled: bool,
}

View File

@@ -1,4 +1,3 @@
pub mod helm;
#[allow(clippy::module_inception)]
pub mod prometheus;
pub mod prometheus_config;

View File

@@ -14,7 +14,7 @@ use crate::{
},
score::Score,
topology::{
HelmCommand, Topology,
HelmCommand, K8sclient, Topology,
installable::Installable,
oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender},
tenant::TenantManager,
@@ -22,12 +22,12 @@ use crate::{
};
use super::{
helm::prometheus_helm::prometheus_helm_chart_score, prometheus_config::PrometheusConfig,
helm::prometheus_helm::prometheus_helm_chart_score, prometheus_config::HelmPrometheusConfig,
};
#[derive(Debug)]
pub struct Prometheus {
pub config: Arc<Mutex<PrometheusConfig>>,
pub config: Arc<Mutex<HelmPrometheusConfig>>,
}
#[async_trait]
@@ -37,27 +37,20 @@ impl AlertSender for Prometheus {
}
}
impl Default for Prometheus {
fn default() -> Self {
Self::new()
}
}
impl Prometheus {
pub fn new() -> Self {
Self {
config: Arc::new(Mutex::new(PrometheusConfig::new())),
config: Arc::new(Mutex::new(HelmPrometheusConfig::new())),
}
}
pub async fn configure_with_topology<T: TenantManager>(&self, topology: &T) {
let ns = topology
.get_tenant_config()
.await
.map(|cfg| cfg.name.clone())
.unwrap_or_else(|| "monitoring".to_string());
if let Some(cfg) = topology.get_tenant_config().await {
debug!("Overriding namespace with tenant config: {}", cfg.name);
self.config.lock().unwrap().namespace = Some(cfg.name.clone());
} else {
debug!("No tenant config found; keeping existing namespace.");
}
error!("This must be refactored, see comments in pr #74");
debug!("NS: {}", ns);
self.config.lock().unwrap().namespace = Some(ns);
}
pub async fn install_receiver(
@@ -100,7 +93,8 @@ impl Prometheus {
topology: &T,
) -> Result<Outcome, InterpretError> {
prometheus_helm_chart_score(self.config.clone())
.interpret(inventory, topology)
.create_interpret()
.execute(inventory, topology)
.await
}
pub async fn install_grafana<T: Topology + HelmCommand + Send + Sync>(
@@ -115,12 +109,13 @@ impl Prometheus {
if let Some(ns) = namespace.as_deref() {
grafana_helm_chart_score(ns)
.interpret(inventory, topology)
.create_interpret()
.execute(inventory, topology)
.await
} else {
Err(InterpretError::new(
"could not install grafana, missing namespace".to_string(),
))
Err(InterpretError::new(format!(
"could not install grafana, missing namespace",
)))
}
}
}

Some files were not shown because too many files have changed in this diff Show More