Compare commits

..

7 Commits

96 changed files with 771 additions and 1529 deletions

12
Cargo.lock generated
View File

@ -429,6 +429,15 @@ dependencies = [
"wait-timeout",
]
[[package]]
name = "assertor"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb"
dependencies = [
"num-traits",
]
[[package]]
name = "async-broadcast"
version = "0.7.2"
@ -1881,6 +1890,8 @@ dependencies = [
"env_logger",
"harmony",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_tui",
"harmony_types",
"log",
@ -3878,6 +3889,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
name = "opnsense-config"
version = "0.1.0"
dependencies = [
"assertor",
"async-trait",
"chrono",
"env_logger",

View File

@ -14,7 +14,8 @@ members = [
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret", "adr/agent_discovery/mdns",
"harmony_secret",
"adr/agent_discovery/mdns",
]
[workspace.package]
@ -66,5 +67,12 @@ thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
askama = "0.14"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
reqwest = { version = "0.12", features = [
"blocking",
"stream",
"rustls-tls",
"http2",
"json",
], default-features = false }
assertor = "0.0.4"

View File

@ -36,59 +36,48 @@ These principles surface as simple, ergonomic Rust APIs that let teams focus on
## 2 · Quick Start
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
```rust
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{PackagingDeployment, rhob_monitoring::Monitoring},
},
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
lamp::{LAMPConfig, LAMPScore},
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
},
topology::K8sAnywhereTopology,
topology::{K8sAnywhereTopology, Url},
};
use harmony_macros::hurl;
use std::{path::PathBuf, sync::Arc};
#[tokio::main]
async fn main() {
let application = Arc::new(RustWebapp {
name: "harmony-example-leptos".to_string(),
project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder
framework: Some(RustWebFramework::Leptos),
service_port: 8080,
});
// Define your Application deployment and the features you want
let app = ApplicationScore {
features: vec![
Box::new(PackagingDeployment {
application: application.clone(),
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![
Box::new(DiscordWebhook {
name: "test-discord".to_string(),
url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url
}),
],
}),
],
application,
// 1. Describe what you want
let lamp_stack = LAMPScore {
name: "harmony-lamp-demo".into(),
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
php_version: Version::from("8.3.0").unwrap(),
config: LAMPConfig {
project_root: "./php".into(),
database_size: "4Gi".into(),
..Default::default()
},
};
// 2. Enhance with extra scores (monitoring, CI/CD, …)
let mut monitoring = MonitoringAlertingStackScore::new();
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
// 3. Run your scores on the desired topology & inventory
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster
vec![Box::new(app)],
None,
)
.await
.unwrap();
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
vec![
Box::new(lamp_stack),
Box::new(monitoring)
],
None
).await.unwrap();
}
```

View File

@ -1,3 +0,0 @@
.terraform
*.tfstate
venv

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

View File

@ -1,5 +0,0 @@
To build :
```bash
npx @marp-team/marp-cli@latest -w slides.md
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@ -1,9 +0,0 @@
To run this :
```bash
virtualenv venv
source venv/bin/activate
pip install ansible ansible-dev-tools
ansible-lint download.yml
ansible-playbook -i localhost download.yml
```

View File

@ -1,8 +0,0 @@
- name: Test Ansible URL Validation
hosts: localhost
tasks:
- name: Download a file
ansible.builtin.get_url:
url: "http:/wikipedia.org/"
dest: "/tmp/ansible-test/wikipedia.html"
mode: '0900'

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 275 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 212 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 384 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.3 KiB

File diff suppressed because one or more lines are too long

View File

@ -1,241 +0,0 @@
---
theme: uncover
---
# Voici l'histoire de Petit Poisson
---
<img src="./Happy_swimmer.jpg" width="600"/>
---
<img src="./happy_landscape_swimmer.jpg" width="1000"/>
---
<img src="./Happy_swimmer.jpg" width="200"/>
<img src="./tryrust.org.png" width="600"/>
[https://tryrust.org](https://tryrust.org)
---
<img src="./texto_deploy_prod_1.png" width="600"/>
---
<img src="./texto_deploy_prod_2.png" width="600"/>
---
<img src="./texto_deploy_prod_3.png" width="600"/>
---
<img src="./texto_deploy_prod_4.png" width="600"/>
---
## Demo time
---
<img src="./Happy_swimmer_sunglasses.jpg" width="1000"/>
---
<img src="./texto_download_wikipedia.png" width="600"/>
---
<img src="./ansible.jpg" width="200"/>
## Ansible❓
---
<img src="./Happy_swimmer.jpg" width="200"/>
```yaml
- name: Download wikipedia
hosts: localhost
tasks:
- name: Download a file
ansible.builtin.get_url:
url: "https:/wikipedia.org/"
dest: "/tmp/ansible-test/wikipedia.html"
mode: '0900'
```
---
<img src="./Happy_swimmer.jpg" width="200"/>
```
ansible-lint download.yml
Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'.
```
---
```
git push
```
---
<img src="./75_years_later.jpg" width="1100"/>
---
<img src="./texto_download_wikipedia_fail.png" width="600"/>
---
<img src="./Happy_swimmer_reversed.jpg" width="600"/>
---
<img src="./ansible_output_fail.jpg" width="1100"/>
---
<img src="./Happy_swimmer_reversed_1hit.jpg" width="600"/>
---
<img src="./ansible_crossed_out.jpg" width="400"/>
---
<img src="./terraform.jpg" width="400"/>
## Terraform❓❗
---
<img src="./Happy_swimmer_reversed_1hit.jpg" width="200"/>
<img src="./terraform.jpg" width="200"/>
```tf
provider "docker" {}
resource "docker_network" "invalid_network" {
name = "my-invalid-network"
ipam_config {
subnet = "172.17.0.0/33"
}
}
```
---
<img src="./Happy_swimmer_reversed_1hit.jpg" width="100"/>
<img src="./terraform.jpg" width="200"/>
```
terraform plan
Terraform used the selected providers to generate the following execution plan.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# docker_network.invalid_network will be created
+ resource "docker_network" "invalid_network" {
+ driver = (known after apply)
+ id = (known after apply)
+ internal = (known after apply)
+ ipam_driver = "default"
+ name = "my-invalid-network"
+ options = (known after apply)
+ scope = (known after apply)
+ ipam_config {
+ subnet = "172.17.0.0/33"
# (2 unchanged attributes hidden)
}
}
Plan: 1 to add, 0 to change, 0 to destroy.
```
---
---
```
terraform apply
```
---
```
Plan: 1 to add, 0 to change, 0 to destroy.
Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.
Enter a value: yes
```
---
```
docker_network.invalid_network: Creating...
│ Error: Unable to create network: Error response from daemon: invalid network config:
│ invalid subnet 172.17.0.0/33: invalid CIDR block notation
│ with docker_network.invalid_network,
│ on main.tf line 11, in resource "docker_network" "invalid_network":
│ 11: resource "docker_network" "invalid_network" {
```
---
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="1100"/>
---
<img src="./ansible_crossed_out.jpg" width="300"/>
<img src="./terraform_crossed_out.jpg" width="400"/>
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="300"/>
---
## Harmony❓❗
---
Demo time
---
<img src="./Happy_swimmer.jpg" width="300"/>
---
# 🎼
Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony)
<img src="./qrcode_gitea_nationtech.png" width="120"/>
LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/)
Courriel : [jg@nationtech.io](mailto:jg@nationtech.io)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/http" {
version = "3.5.0"
hashes = [
"h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=",
"zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7",
"zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736",
"zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476",
"zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90",
"zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a",
"zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6",
"zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6",
"zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee",
"zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926",
"zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.5.3"
hashes = [
"h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=",
"zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927",
"zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e",
"zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf",
"zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d",
"zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09",
"zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f",
"zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1",
"zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6",
"zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47",
"zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82",
]
}

View File

@ -1,10 +0,0 @@
provider "http" {}
data "http" "remote_file" {
url = "http:/example.com/file.txt"
}
resource "local_file" "downloaded_file" {
content = data.http.remote_file.body
filename = "${path.module}/downloaded_file.txt"
}

View File

@ -1,24 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/kreuzwerker/docker" {
version = "3.0.2"
constraints = "~> 3.0.1"
hashes = [
"h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=",
"zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f",
"zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95",
"zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138",
"zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da",
"zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2",
"zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06",
"zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1",
"zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710",
"zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005",
"zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d",
"zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792",
"zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387",
"zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62",
"zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d",
]
}

View File

@ -1,17 +0,0 @@
terraform {
required_providers {
docker = {
source = "kreuzwerker/docker"
version = "~> 3.0.1" # Adjust version as needed
}
}
}
provider "docker" {}
resource "docker_network" "invalid_network" {
name = "my-invalid-network"
ipam_config {
subnet = "172.17.0.0/33"
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 325 KiB

View File

@ -27,6 +27,7 @@ async fn main() {
};
let application = Arc::new(RustWebapp {
name: "example-monitoring".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
service_port: 3000,

View File

@ -4,7 +4,8 @@ use harmony::{
inventory::Inventory,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp, features::rhob_monitoring::Monitoring,
ApplicationScore, RustWebFramework, RustWebapp,
features::rhob_monitoring::RHOBMonitoring,
},
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
},
@ -16,6 +17,7 @@ use harmony_types::net::Url;
async fn main() {
let application = Arc::new(RustWebapp {
name: "test-rhob-monitoring".to_string(),
domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()),
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
framework: Some(RustWebFramework::Leptos),
service_port: 3000,
@ -28,7 +30,7 @@ async fn main() {
let app = ApplicationScore {
features: vec![
Box::new(Monitoring {
Box::new(RHOBMonitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver)],
}),

View File

@ -5,7 +5,7 @@ use harmony::{
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{Monitoring, PackagingDeployment},
features::{ContinuousDelivery, Monitoring},
},
monitoring::alert_channel::{
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
@ -19,6 +19,7 @@ use harmony_macros::hurl;
async fn main() {
let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(),
domain: hurl!("https://rustapp.harmony.example.com"),
project_root: PathBuf::from("./webapp"),
framework: Some(RustWebFramework::Leptos),
service_port: 3000,
@ -36,7 +37,7 @@ async fn main() {
let app = ApplicationScore {
features: vec![
Box::new(PackagingDeployment {
Box::new(ContinuousDelivery {
application: application.clone(),
}),
Box::new(Monitoring {

View File

@ -1 +0,0 @@
harmony

View File

@ -1,20 +0,0 @@
[package]
name = "harmony-tryrust"
edition = "2024"
version = "0.1.0"
[dependencies]
harmony = { path = "../../../nationtech/harmony/harmony" }
harmony_cli = { path = "../../../nationtech/harmony/harmony_cli" }
harmony_types = { path = "../../../nationtech/harmony/harmony_types" }
harmony_macros = { path = "../../../nationtech/harmony/harmony_macros" }
tokio = { version = "1.40", features = [
"io-std",
"fs",
"macros",
"rt-multi-thread",
] }
log = { version = "0.4", features = ["kv"] }
env_logger = "0.11"
url = "2.5"
base64 = "0.22.1"

View File

@ -1,50 +0,0 @@
use harmony::{
inventory::Inventory,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{PackagingDeployment, rhob_monitoring::Monitoring},
},
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
},
topology::K8sAnywhereTopology,
};
use harmony_macros::hurl;
use std::{path::PathBuf, sync::Arc};
#[tokio::main]
async fn main() {
let application = Arc::new(RustWebapp {
name: "tryrust".to_string(),
project_root: PathBuf::from(".."),
framework: Some(RustWebFramework::Leptos),
service_port: 8080,
});
let discord_webhook = DiscordWebhook {
name: "harmony_demo".to_string(),
url: hurl!("http://not_a_url.com"),
};
let app = ApplicationScore {
features: vec![
Box::new(PackagingDeployment {
application: application.clone(),
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_webhook)],
}),
],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(app)],
None,
)
.await
.unwrap();
}

View File

@ -1,39 +1,41 @@
use std::{path::PathBuf, sync::Arc};
use harmony::{
inventory::Inventory,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{PackagingDeployment, rhob_monitoring::Monitoring},
features::{ContinuousDelivery, Monitoring},
},
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
},
topology::K8sAnywhereTopology,
};
use harmony_macros::hurl;
use std::{path::PathBuf, sync::Arc};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let application = Arc::new(RustWebapp {
name: "harmony-example-tryrust".to_string(),
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
// submodule
domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()),
project_root: PathBuf::from("./tryrust.org"),
framework: Some(RustWebFramework::Leptos),
service_port: 8080,
});
// Define your Application deployment and the features you want
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![
Box::new(PackagingDeployment {
Box::new(ContinuousDelivery {
application: application.clone(),
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![Box::new(DiscordWebhook {
name: "test-discord".to_string(),
url: hurl!("https://discord.doesnt.exist.com"),
})],
alert_receiver: vec![Box::new(discord_receiver)],
}),
],
application,
@ -41,7 +43,7 @@ async fn main() {
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned k3d by default or connect to any kubernetes cluster
K8sAnywhereTopology::from_env(),
vec![Box::new(app)],
None,
)

View File

@ -10,11 +10,7 @@ testing = []
[dependencies]
hex = "0.4"
reqwest = { version = "0.11", features = [
"blocking",
"json",
"rustls-tls",
], default-features = false }
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"

View File

@ -34,7 +34,6 @@ pub enum InterpretName {
CephClusterHealth,
Custom(&'static str),
RHOBAlerting,
K8sIngress,
}
impl std::fmt::Display for InterpretName {
@ -65,7 +64,6 @@ impl std::fmt::Display for InterpretName {
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
InterpretName::Custom(name) => f.write_str(name),
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
InterpretName::K8sIngress => f.write_str("K8sIngress"),
}
}
}
@ -84,15 +82,13 @@ pub trait Interpret<T>: std::fmt::Debug + Send {
pub struct Outcome {
pub status: InterpretStatus,
pub message: String,
pub details: Vec<String>,
}
impl Outcome {
pub fn noop(message: String) -> Self {
pub fn noop() -> Self {
Self {
status: InterpretStatus::NOOP,
message,
details: vec![],
message: String::new(),
}
}
@ -100,23 +96,6 @@ impl Outcome {
Self {
status: InterpretStatus::SUCCESS,
message,
details: vec![],
}
}
pub fn success_with_details(message: String, details: Vec<String>) -> Self {
Self {
status: InterpretStatus::SUCCESS,
message,
details,
}
}
pub fn running(message: String) -> Self {
Self {
status: InterpretStatus::RUNNING,
message,
details: vec![],
}
}
}

View File

@ -1,7 +0,0 @@
use crate::topology::PreparationError;
use async_trait::async_trait;
#[async_trait]
pub trait Ingress {
async fn get_domain(&self, service: &str) -> Result<String, PreparationError>;
}

View File

@ -1,7 +1,6 @@
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use kube::api::GroupVersionKind;
use log::{debug, info, warn};
use serde::Serialize;
use tokio::sync::OnceCell;
@ -23,7 +22,6 @@ use crate::{
},
},
score::Score,
topology::ingress::Ingress,
};
use super::{
@ -200,26 +198,6 @@ impl K8sAnywhereTopology {
}
}
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
let client = self.k8s_client().await?;
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 {
return Ok(());
} else {
return Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(),
));
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
@ -372,10 +350,6 @@ impl K8sAnywhereTopology {
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => {
warn!(
"Installing observability operator is not supported on LocalK3d source"
);
return Ok(PreparationOutcome::Noop);
debug!("installing cluster observability operator");
todo!();
let op_score =
@ -554,7 +528,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
match self.config.harmony_profile.to_lowercase().as_str() {
"staging" => DeploymentTarget::Staging,
"production" => DeploymentTarget::Production,
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"),
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
}
}
}
@ -576,45 +550,3 @@ impl TenantManager for K8sAnywhereTopology {
.await
}
}
#[async_trait]
impl Ingress for K8sAnywhereTopology {
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
let client = self.k8s_client().await?;
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
K8sSource::Kubeconfig => {
self.openshift_ingress_operator_available().await?;
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value(
"default",
Some("openshift-ingress-operator"),
&gvk,
)
.await
.map_err(|_| {
PreparationError::new("Failed to fetch IngressController".to_string())
})?;
match ic.data["status"]["domain"].as_str() {
Some(domain) => Ok(format!("{service}.{domain}")),
None => Err(PreparationError::new("Could not find domain".to_string())),
}
}
}
} else {
Err(PreparationError::new(
"Cannot get domain: unable to detect K8s state".to_string(),
))
}
}
}

View File

@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync {
&self,
service: &LoadBalancerService,
) -> Result<(), ExecutorError> {
debug!(
"Listing LoadBalancer services {:?}",
self.list_services().await
);
if !self.list_services().await.contains(service) {
self.add_service(service).await?;
}
self.add_service(service).await?;
Ok(())
}
}

View File

@ -1,10 +1,9 @@
use async_trait::async_trait;
use derive_new::new;
use serde::{Deserialize, Serialize};
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
#[derive(new, Clone, Debug, Serialize, Deserialize)]
#[derive(new)]
pub struct LocalhostTopology;
#[async_trait]

View File

@ -1,5 +1,4 @@
mod ha_cluster;
pub mod ingress;
use harmony_types::net::IpAddress;
mod host_binding;
mod http;

View File

@ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall {
}
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
warn!(
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
);
let mut config = self.opnsense_config.write().await;
let mut load_balancer = config.load_balancer();
let (frontend, backend, servers, healthcheck) =
harmony_load_balancer_service_to_haproxy_xml(service);
let mut load_balancer = config.load_balancer();
load_balancer.add_backend(backend);
load_balancer.add_frontend(frontend);
load_balancer.add_servers(servers);
if let Some(healthcheck) = healthcheck {
load_balancer.add_healthcheck(healthcheck);
}
load_balancer.configure_service(frontend, backend, servers, healthcheck);
Ok(())
}
@ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
.backends
.backends
.iter()
.find(|b| b.uuid == frontend.default_backend);
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
let mut health_check = None;
match matching_backend {
@ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
}
None => {
warn!(
"HAProxy config could not find a matching backend for frontend {:?}",
frontend
"HAProxy config could not find a matching backend for frontend {frontend:?}"
);
}
}
@ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend(
.servers
.iter()
.filter_map(|server| {
let address = server.address.clone()?;
let port = server.port?;
if backend_servers.contains(&server.uuid.as_str()) {
return Some(BackendServer {
address: server.address.clone(),
port: server.port,
});
return Some(BackendServer { address, port });
}
None
})
@ -347,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
name: format!("frontend_{}", service.listening_port),
bind: service.listening_port.to_string(),
mode: "tcp".to_string(), // TODO do not depend on health check here
default_backend: backend.uuid.clone(),
default_backend: Some(backend.uuid.clone()),
..Default::default()
};
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
@ -361,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
uuid: Uuid::new_v4().to_string(),
name: format!("{}_{}", &server.address, &server.port),
enabled: 1,
address: server.address.clone(),
port: server.port,
address: Some(server.address.clone()),
port: Some(server.port),
mode: "active".to_string(),
server_type: "static".to_string(),
..Default::default()
@ -385,8 +378,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
address: Some("192.168.1.1".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
@ -411,8 +404,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
address: Some("192.168.1.1".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
@ -431,8 +424,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
address: Some("192.168.1.1".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
@ -453,16 +446,16 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "some-hostname.test.mcd".to_string(),
port: 80,
address: Some("some-hostname.test.mcd".to_string()),
port: Some(80),
..Default::default()
};
haproxy.servers.servers.push(server);
let server = HAProxyServer {
uuid: "server2".to_string(),
address: "192.168.1.2".to_string(),
port: 8080,
address: Some("192.168.1.2".to_string()),
port: Some(8080),
..Default::default()
};
haproxy.servers.servers.push(server);

View File

@ -1,10 +1,7 @@
use std::error::Error;
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{executors::ExecutorError, topology::Topology};
use crate::topology::Topology;
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
/// ContinuousIntegration, ContinuousDelivery
@ -12,10 +9,7 @@ use crate::{executors::ExecutorError, topology::Topology};
pub trait ApplicationFeature<T: Topology>:
std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T>
{
async fn ensure_installed(
&self,
topology: &T,
) -> Result<InstallationOutcome, InstallationError>;
async fn ensure_installed(&self, topology: &T) -> Result<(), String>;
fn name(&self) -> String;
}
@ -46,60 +40,3 @@ impl<T: Topology> Clone for Box<dyn ApplicationFeature<T>> {
self.clone_box()
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum InstallationOutcome {
Success { details: Vec<String> },
Noop,
}
impl InstallationOutcome {
pub fn success() -> Self {
Self::Success { details: vec![] }
}
pub fn success_with_details(details: Vec<String>) -> Self {
Self::Success { details }
}
pub fn noop() -> Self {
Self::Noop
}
}
#[derive(Debug, Clone, new)]
pub struct InstallationError {
msg: String,
}
impl std::fmt::Display for InstallationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.msg)
}
}
impl Error for InstallationError {}
impl From<ExecutorError> for InstallationError {
fn from(value: ExecutorError) -> Self {
Self {
msg: format!("InstallationError : {value}"),
}
}
}
impl From<kube::Error> for InstallationError {
fn from(value: kube::Error) -> Self {
Self {
msg: format!("InstallationError : {value}"),
}
}
}
impl From<String> for InstallationError {
fn from(value: String) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}

View File

@ -10,13 +10,11 @@ use crate::{
data::Version,
inventory::Inventory,
modules::application::{
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
},
score::Score,
topology::{
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
},
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
};
/// ContinuousDelivery in Harmony provides this functionality :
@ -47,11 +45,11 @@ use crate::{
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
/// - Kubernetes for runtime orchestration
#[derive(Debug, Default, Clone)]
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
pub application: Arc<A>,
}
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
async fn deploy_to_local_k3d(
&self,
app_name: String,
@ -138,28 +136,18 @@ impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
#[async_trait]
impl<
A: OCICompliant + HelmPackage + Clone + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
> ApplicationFeature<T> for PackagingDeployment<A>
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
> ApplicationFeature<T> for ContinuousDelivery<A>
{
async fn ensure_installed(
&self,
topology: &T,
) -> Result<InstallationOutcome, InstallationError> {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
let image = self.application.image_name();
let domain = topology
.get_domain(&self.application.name())
.await
.map_err(|e| e.to_string())?;
// TODO Write CI/CD workflow files
// we can autotedect the CI type using the remote url (default to github action for github
// url, etc..)
// Or ask for it when unknown
let helm_chart = self
.application
.build_push_helm_package(&image, &domain)
.await?;
let helm_chart = self.application.build_push_helm_package(&image).await?;
// TODO: Make building image configurable/skippable if image already exists (prompt)")
// https://git.nationtech.io/NationTech/harmony/issues/104
@ -208,11 +196,7 @@ impl<
.unwrap();
}
};
Ok(InstallationOutcome::success_with_details(vec![format!(
"{}: http://{domain}",
self.application.name()
)]))
Ok(())
}
fn name(&self) -> String {
"ContinuousDelivery".to_string()

View File

@ -2,7 +2,7 @@ use async_trait::async_trait;
use log::info;
use crate::{
modules::application::{ApplicationFeature, InstallationError, InstallationOutcome},
modules::application::ApplicationFeature,
topology::{K8sclient, Topology},
};
@ -29,10 +29,7 @@ impl Default for PublicEndpoint {
/// For now we only suport K8s ingress, but we will support more stuff at some point
#[async_trait]
impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
async fn ensure_installed(
&self,
_topology: &T,
) -> Result<InstallationOutcome, InstallationError> {
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
info!(
"Making sure public endpoint is installed for port {}",
self.application_port

View File

@ -13,8 +13,7 @@ use crate::{
modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score,
topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
},
};
use harmony_types::id::Id;
@ -28,7 +27,7 @@ pub struct ArgoHelmScore {
pub argo_apps: Vec<ArgoApplication>,
}
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore {
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(ArgoInterpret {
score: self.clone(),
@ -48,15 +47,17 @@ pub struct ArgoInterpret {
}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret {
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let k8s_client = topology.k8s_client().await?;
let svc = format!("argo-{}", self.score.namespace.clone());
let domain = topology.get_domain(&svc).await?;
let domain = self
.get_host_domain(k8s_client.clone(), self.score.openshift)
.await?;
let domain = format!("argo.{domain}");
let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
@ -67,17 +68,14 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
.await
.unwrap();
Ok(Outcome::success_with_details(
format!(
"ArgoCD {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
),
vec![format!("argo application: http://{}", domain)],
))
Ok(Outcome::success(format!(
"ArgoCD installed with {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
)))
}
fn get_name(&self) -> InterpretName {

View File

@ -5,8 +5,8 @@ pub use endpoint::*;
mod monitoring;
pub use monitoring::*;
mod packaging_deployment;
pub use packaging_deployment::*;
mod continuous_delivery;
pub use continuous_delivery::*;
mod helm_argocd_score;
pub use helm_argocd_score::*;

View File

@ -1,10 +1,10 @@
use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome,
};
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::MultiTargetTopology;
use crate::topology::ingress::Ingress;
use crate::{
inventory::Inventory,
modules::monitoring::{
@ -19,12 +19,8 @@ use crate::{
};
use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose};
use harmony_secret::SecretManager;
use harmony_secret_derive::Secret;
use harmony_types::net::Url;
use log::{debug, info};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct Monitoring {
@ -40,22 +36,17 @@ impl<
+ TenantManager
+ K8sclient
+ MultiTargetTopology
+ PrometheusApplicationMonitoring<CRDPrometheus>
+ Ingress
+ std::fmt::Debug,
+ std::fmt::Debug
+ PrometheusApplicationMonitoring<CRDPrometheus>,
> ApplicationFeature<T> for Monitoring
{
async fn ensure_installed(
&self,
topology: &T,
) -> Result<InstallationOutcome, InstallationError> {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
info!("Ensuring monitoring is available for application");
let namespace = topology
.get_tenant_config()
.await
.map(|ns| ns.name.clone())
.unwrap_or_else(|| self.application.name());
let domain = topology.get_domain("ntfy").await.unwrap();
let mut alerting_score = ApplicationMonitoringScore {
sender: CRDPrometheus {
@ -67,17 +58,19 @@ impl<
};
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: domain,
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
};
ntfy.interpret(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
let config = SecretManager::get_or_prompt::<NtfyAuth>().await.unwrap();
let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
let ntfy_default_auth_header = format!(
"Basic {}",
general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password))
general_purpose::STANDARD.encode(format!(
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
))
);
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
@ -107,17 +100,9 @@ impl<
.interpret(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
Ok(InstallationOutcome::success())
Ok(())
}
fn name(&self) -> String {
"Monitoring".to_string()
}
}
#[derive(Secret, Serialize, Deserialize, Clone, Debug)]
struct NtfyAuth {
username: String,
password: String,
}

View File

@ -1,14 +1,11 @@
use std::sync::Arc;
use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome,
};
use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::topology::MultiTargetTopology;
use crate::topology::ingress::Ingress;
use crate::{
inventory::Inventory,
modules::monitoring::{
@ -27,7 +24,7 @@ use harmony_types::net::Url;
use log::{debug, info};
#[derive(Debug, Clone)]
pub struct Monitoring {
pub struct RHOBMonitoring {
pub application: Arc<dyn Application>,
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
}
@ -40,15 +37,11 @@ impl<
+ TenantManager
+ K8sclient
+ MultiTargetTopology
+ Ingress
+ std::fmt::Debug
+ PrometheusApplicationMonitoring<RHOBObservability>,
> ApplicationFeature<T> for Monitoring
> ApplicationFeature<T> for RHOBMonitoring
{
async fn ensure_installed(
&self,
topology: &T,
) -> Result<InstallationOutcome, InstallationError> {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
info!("Ensuring monitoring is available for application");
let namespace = topology
.get_tenant_config()
@ -64,13 +57,9 @@ impl<
application: self.application.clone(),
receivers: self.alert_receiver.clone(),
};
let domain = topology
.get_domain("ntfy")
.await
.map_err(|e| format!("could not get domain {e}"))?;
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: domain.clone(),
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
};
ntfy.interpret(&Inventory::empty(), topology)
.await
@ -92,33 +81,27 @@ impl<
.replace("=", "");
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
let ntfy_receiver = WebhookReceiver {
name: "ntfy-webhook".to_string(),
url: Url::Url(
url::Url::parse(
format!(
"http://{domain}/{}?auth={ntfy_default_auth_param}",
self.application.name()
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
namespace.clone()
)
.as_str(),
)
.unwrap(),
),
};
debug!(
"ntfy webhook receiver \n{:#?}\nntfy topic: {}",
ntfy_receiver.clone(),
self.application.name()
);
alerting_score.receivers.push(Box::new(ntfy_receiver));
alerting_score
.interpret(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
Ok(InstallationOutcome::success_with_details(vec![format!(
"ntfy topic: {}",
self.application.name()
)]))
Ok(())
}
fn name(&self) -> String {
"Monitoring".to_string()

View File

@ -24,8 +24,8 @@ use harmony_types::id::Id;
#[derive(Clone, Debug)]
pub enum ApplicationFeatureStatus {
Installing,
Installed { details: Vec<String> },
Failed { message: String },
Installed,
Failed { details: String },
}
pub trait Application: std::fmt::Debug + Send + Sync {
@ -65,32 +65,27 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
.unwrap();
let _ = match feature.ensure_installed(topology).await {
Ok(outcome) => {
Ok(()) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installed {
details: match outcome {
InstallationOutcome::Success { details } => details,
InstallationOutcome::Noop => vec![],
},
},
status: ApplicationFeatureStatus::Installed,
})
.unwrap();
}
Err(error) => {
Err(msg) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Failed {
message: error.to_string(),
details: msg.clone(),
},
})
.unwrap();
return Err(InterpretError::new(format!(
"Application Interpret failed to install feature : {error}"
"Application Interpret failed to install feature : {msg}"
)));
}
};

View File

@ -1,6 +1,7 @@
use super::Application;
use async_trait::async_trait;
use super::Application;
#[async_trait]
pub trait OCICompliant: Application {
async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here
@ -16,10 +17,5 @@ pub trait HelmPackage: Application {
///
/// # Arguments
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
/// * `domain` - The domain where the application is hosted.
async fn build_push_helm_package(
&self,
image_url: &str,
domain: &str,
) -> Result<String, String>;
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>;
}

View File

@ -1,4 +1,5 @@
use std::fs::{self};
use std::fs::{self, File};
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
@ -10,13 +11,14 @@ use dockerfile_builder::Dockerfile;
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt;
use log::{debug, error, info, log_enabled, trace, warn};
use log::{debug, info, log_enabled};
use serde::Serialize;
use tar::{Builder, Header};
use tar::{Archive, Builder, Header};
use walkdir::WalkDir;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::{score::Score, topology::Topology};
use harmony_types::net::Url;
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
@ -56,6 +58,7 @@ pub enum RustWebFramework {
#[derive(Debug, Clone, Serialize)]
pub struct RustWebapp {
pub name: String,
pub domain: Url,
/// The path to the root of the Rust project to be containerized.
pub project_root: PathBuf,
pub service_port: u32,
@ -70,17 +73,12 @@ impl Application for RustWebapp {
#[async_trait]
impl HelmPackage for RustWebapp {
async fn build_push_helm_package(
&self,
image_url: &str,
domain: &str,
) -> Result<String, String> {
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
info!("Starting Helm chart build and push for '{}'", self.name);
// 1. Create the Helm chart files on disk.
let chart_dir = self
.create_helm_chart_files(image_url, domain)
.await
.create_helm_chart_files(image_url)
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
info!("Successfully created Helm chart files in {:?}", chart_dir);
@ -162,7 +160,7 @@ impl RustWebapp {
&self,
image_name: &str,
) -> Result<String, Box<dyn std::error::Error>> {
info!("Generating Dockerfile for '{}'", self.name);
debug!("Generating Dockerfile for '{}'", self.name);
let dockerfile = self.get_or_build_dockerfile();
let quiet = !log_enabled!(log::Level::Debug);
match dockerfile
@ -194,41 +192,8 @@ impl RustWebapp {
Some(body_full(tar_data.into())),
);
while let Some(mut msg) = image_build_stream.next().await {
trace!("Got bollard msg {msg:?}");
match msg {
Ok(mut msg) => {
if let Some(progress) = msg.progress_detail {
info!(
"Build progress {}/{}",
progress.current.unwrap_or(0),
progress.total.unwrap_or(0)
);
}
if let Some(mut log) = msg.stream {
if log.ends_with('\n') {
log.pop();
if log.ends_with('\r') {
log.pop();
}
}
info!("{log}");
}
if let Some(error) = msg.error {
warn!("Build error : {error:?}");
}
if let Some(error) = msg.error_detail {
warn!("Build error : {error:?}");
}
}
Err(e) => {
error!("Build failed : {e}");
return Err(format!("Build failed : {e}").into());
}
}
while let Some(msg) = image_build_stream.next().await {
debug!("Message: {msg:?}");
}
Ok(image_name.to_string())
@ -255,9 +220,7 @@ impl RustWebapp {
".git",
".github",
".harmony_generated",
"harmony",
"node_modules",
"Dockerfile.harmony",
];
let mut entries: Vec<_> = WalkDir::new(project_root)
.into_iter()
@ -302,6 +265,8 @@ impl RustWebapp {
let docker = Docker::connect_with_socket_defaults().unwrap();
// let push_options = PushImageOptionsBuilder::new().tag(tag);
let mut push_image_stream = docker.push_image(
image_tag,
Some(PushImageOptionsBuilder::new().build()),
@ -309,8 +274,6 @@ impl RustWebapp {
);
while let Some(msg) = push_image_stream.next().await {
// let msg = msg?;
// TODO this fails silently, for some reason bollard cannot push to hub.nationtech.io
debug!("Message: {msg:?}");
}
@ -445,10 +408,9 @@ impl RustWebapp {
}
/// Creates all necessary files for a basic Helm chart.
async fn create_helm_chart_files(
fn create_helm_chart_files(
&self,
image_url: &str,
domain: &str,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_name = format!("{}-chart", self.name);
let chart_dir = self
@ -498,15 +460,21 @@ ingress:
enabled: true
# Annotations for cert-manager to handle SSL.
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
# Add other annotations like nginx ingress class if needed
# kubernetes.io/ingress.class: nginx
hosts:
- host: {}
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: {}-tls
hosts:
- chart-example.local
"#,
chart_name, image_repo, image_tag, self.service_port, domain,
chart_name, image_repo, image_tag, self.service_port, self.name
);
fs::write(chart_dir.join("values.yaml"), values_yaml)?;

View File

@ -69,14 +69,17 @@ impl DhcpInterpret {
dhcp_server.set_pxe_options(pxe_options).await?;
Ok(Outcome::success(format!(
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
self.score.boot_filename,
self.score.boot_filename,
self.score.filename,
self.score.filename64,
self.score.filenameipxe
)))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
self.score.boot_filename,
self.score.boot_filename,
self.score.filename,
self.score.filename64,
self.score.filenameipxe
),
))
}
}
@ -119,7 +122,8 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
topology.commit_config().await?;
Ok(Outcome::success(
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dhcp Interpret execution successful".to_string(),
))
}
@ -193,10 +197,10 @@ impl DhcpHostBindingInterpret {
}
}
Ok(Outcome::success(format!(
"Dhcp Interpret registered {} entries",
number_new_entries
)))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret registered {} entries", number_new_entries),
))
}
}
@ -232,9 +236,12 @@ impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret {
topology.commit_config().await?;
Ok(Outcome::success(format!(
"Dhcp Host Binding Interpret execution successful on {} hosts",
self.score.host_binding.len()
)))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Dhcp Host Binding Interpret execution successful on {} hosts",
self.score.host_binding.len()
),
))
}
}

View File

@ -55,7 +55,8 @@ impl DnsInterpret {
dns.register_dhcp_leases(register).await?;
}
Ok(Outcome::success(
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"DNS Interpret execution successfull".to_string(),
))
}
@ -67,10 +68,13 @@ impl DnsInterpret {
let entries = &self.score.dns_entries;
dns_server.ensure_hosts_registered(entries.clone()).await?;
Ok(Outcome::success(format!(
"DnsInterpret registered {} hosts successfully",
entries.len()
)))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"DnsInterpret registered {} hosts successfully",
entries.len()
),
))
}
}
@ -107,7 +111,8 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
topology.commit_config().await?;
Ok(Outcome::success(
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dns Interpret execution successful".to_string(),
))
}

View File

@ -153,10 +153,6 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
Some(yaml_str) => {
tf = temp_file::with_contents(yaml_str.as_bytes());
debug!(
"values yaml string for chart {} :\n {yaml_str}",
self.score.chart_name
);
Some(tf.path())
}
None => None,
@ -197,10 +193,13 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
self.score.release_name, ns
);
return Ok(Outcome::success(format!(
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
self.score.release_name
)));
return Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
self.score.release_name
),
));
} else {
info!(
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
@ -225,18 +224,18 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
};
match status {
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!(
"Helm Chart {} deployed",
self.score.release_name
))),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!(
"Helm Chart {} pending install...",
self.score.release_name
))),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!(
"Helm Chart {} pending upgrade...",
self.score.release_name
))),
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Helm Chart {} deployed", self.score.release_name),
)),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending install...", self.score.release_name),
)),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending upgrade...", self.score.release_name),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
"Helm Chart {} installation failed",
self.score.release_name

View File

@ -133,9 +133,10 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
},
)
.await;
Ok(Outcome::success(
"Discovery process completed successfully".to_string(),
))
Ok(Outcome {
status: InterpretStatus::SUCCESS,
message: "Discovery process completed successfully".to_string(),
})
}
fn get_name(&self) -> InterpretName {

View File

@ -1,15 +1,11 @@
use async_trait::async_trait;
use harmony_macros::ingress_path;
use harmony_types::id::Id;
use k8s_openapi::api::networking::v1::Ingress;
use log::{debug, trace};
use serde::Serialize;
use serde_json::json;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
interpret::Interpret,
score::Score,
topology::{K8sclient, Topology},
};
@ -44,7 +40,6 @@ pub struct K8sIngressScore {
pub path: Option<IngressPath>,
pub path_type: Option<PathType>,
pub namespace: Option<fqdn::FQDN>,
pub ingress_class_name: Option<String>,
}
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
@ -59,18 +54,12 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
None => PathType::Prefix,
};
let ingress_class = match self.ingress_class_name.clone() {
Some(ingress_class_name) => ingress_class_name,
None => "\"default\"".to_string(),
};
let ingress = json!(
{
"metadata": {
"name": self.name.to_string(),
},
"spec": {
"ingressClassName": ingress_class.as_str(),
"rules": [
{ "host": self.host.to_string(),
"http": {
@ -101,12 +90,11 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
"Successfully built Ingress for host {:?}",
ingress.metadata.name
);
Box::new(K8sIngressInterpret {
ingress,
service: self.name.to_string(),
namespace: self.namespace.clone().map(|f| f.to_string()),
host: self.host.clone(),
Box::new(K8sResourceInterpret {
score: K8sResourceScore::single(
ingress.clone(),
self.namespace.clone().map(|f| f.to_string()),
),
})
}
@ -114,62 +102,3 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
format!("{} K8sIngressScore", self.name)
}
}
#[derive(std::fmt::Debug)]
struct K8sIngressInterpret {
ingress: Ingress,
service: String,
namespace: Option<String>,
host: fqdn::FQDN,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for K8sIngressInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let result = K8sResourceInterpret {
score: K8sResourceScore::single(self.ingress.clone(), self.namespace.clone()),
}
.execute(inventory, topology)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => {
let details = match &self.namespace {
Some(namespace) => {
vec![format!(
"{} ({namespace}): http://{}",
self.service, self.host
)]
}
None => vec![format!("{}: {}", self.service, self.host)],
};
Ok(Outcome::success_with_details(outcome.message, details))
}
_ => Ok(outcome),
},
Err(e) => Err(e),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::K8sIngress
}
fn get_version(&self) -> Version {
Version::from("0.0.1").unwrap()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
}

View File

@ -147,7 +147,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
port: 8080,
path: Some(ingress_path),
path_type: None,
ingress_class_name: None,
namespace: self
.get_namespace()
.map(|nbs| fqdn!(nbs.to_string().as_str())),

View File

@ -35,24 +35,6 @@ pub struct DiscordWebhook {
#[async_trait]
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
let ns = sender.namespace.clone();
let secret_name = format!("{}-secret", self.name.clone());
let webhook_key = format!("{}", self.url.clone());
let mut string_data = BTreeMap::new();
string_data.insert("webhook-url".to_string(), webhook_key.clone());
let secret = Secret {
metadata: kube::core::ObjectMeta {
name: Some(secret_name.clone()),
..Default::default()
},
string_data: Some(string_data),
type_: Some("Opaque".to_string()),
..Default::default()
};
let _ = sender.client.apply(&secret, Some(&ns)).await;
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
data: json!({
"route": {
@ -61,14 +43,9 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
"receivers": [
{
"name": self.name,
"discordConfigs": [
"webhookConfigs": [
{
"apiURL": {
"name": secret_name,
"key": "webhook-url",
},
"title": "{{ template \"discord.default.title\" . }}",
"message": "{{ template \"discord.default.message\" . }}"
"url": self.url,
}
]
}

View File

@ -43,11 +43,6 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
"webhookConfigs": [
{
"url": self.url,
"httpConfig": {
"tlsConfig": {
"insecureSkipVerify": true
}
}
}
]
}

View File

@ -68,9 +68,7 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => {
Ok(Outcome::noop("Prometheus installation skipped".into()))
}
PreparationOutcome::Noop => Ok(Outcome::noop()),
},
Err(err) => Err(InterpretError::from(err)),
}

View File

@ -70,9 +70,7 @@ impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => {
Ok(Outcome::noop("Prometheus installation skipped".into()))
}
PreparationOutcome::Noop => Ok(Outcome::noop()),
},
Err(err) => Err(InterpretError::from(err)),
}

View File

@ -4,9 +4,7 @@ use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
LabelSelector, PrometheusSpec,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@ -45,12 +45,6 @@ service:
ingress:
enabled: {ingress_enabled}
hosts:
- host: {host}
paths:
- path: /
pathType: ImplementationSpecific
route:
enabled: {route_enabled}

View File

@ -113,13 +113,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
.await?;
info!("user added");
Ok(Outcome::success_with_details(
"Ntfy installed".to_string(),
vec![format!(
"Ntfy ({}): http://{}",
self.score.namespace, self.score.host
)],
))
Ok(Outcome::success("Ntfy installed".to_string()))
}
fn get_name(&self) -> InterpretName {

View File

@ -1,19 +1,19 @@
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::{error, info, warn};
use serde::Serialize;
use crate::{
data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::inventory::DiscoverHostForRoleScore,
modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore},
score::Score,
topology::HAClusterTopology,
};
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::info;
use serde::Serialize;
// -------------------------------------------------------------------------------------------------
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
// - This score exposes/ensures the default inventory assets and waits for discoveries.
@ -109,9 +109,12 @@ When you can dig them, confirm to continue.
.await?;
}
Ok(Outcome::success(format!(
"Found and assigned bootstrap node: {}",
bootstrap_host.unwrap().summary()
)))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Found and assigned bootstrap node: {}",
bootstrap_host.unwrap().summary()
),
))
}
}

View File

@ -1,13 +1,25 @@
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
@ -16,15 +28,6 @@ use crate::{
score::Score,
topology::{HAClusterTopology, HostBinding},
};
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize;
use std::path::PathBuf;
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
// -------------------------------------------------------------------------------------------------
// Step 02: Bootstrap
// - Select bootstrap node (from discovered set).
@ -310,7 +313,7 @@ impl OKDSetup02BootstrapInterpret {
info!("[Bootstrap] Rebooting bootstrap node via SSH");
// TODO reboot programatically, there are some logical checks and refactoring to do such as
// accessing the bootstrap node config (ip address) from the inventory
let _ = inquire::Confirm::new(
let confirmation = inquire::Confirm::new(
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
)
.prompt()
@ -376,6 +379,9 @@ impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
self.reboot_target().await?;
self.wait_for_bootstrap_complete().await?;
Ok(Outcome::success("Bootstrap phase complete".into()))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Bootstrap phase complete".into(),
))
}
}

View File

@ -1,3 +1,11 @@
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize;
use crate::{
data::Version,
hardware::PhysicalHost,
@ -11,12 +19,6 @@ use crate::{
score::Score,
topology::{HAClusterTopology, HostBinding},
};
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize;
// -------------------------------------------------------------------------------------------------
// Step 03: Control Plane
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
@ -267,7 +269,8 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
// the `wait-for bootstrap-complete` command.
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
Ok(Outcome::success(
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Control plane provisioning has been successfully initiated.".into(),
))
}

View File

@ -1,17 +1,33 @@
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::info;
use serde::Serialize;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
data::Version,
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
},
},
score::Score,
topology::HAClusterTopology,
topology::{HAClusterTopology, HostBinding},
};
// -------------------------------------------------------------------------------------------------
// Step 04: Workers
// - Render per-MAC PXE & ignition for workers; join nodes.
@ -78,6 +94,9 @@ impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.render_and_reboot().await?;
Ok(Outcome::success("Workers provisioned".into()))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Workers provisioned".into(),
))
}
}

View File

@ -1,16 +1,33 @@
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
};
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::info;
use serde::Serialize;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
},
},
score::Score,
topology::{HAClusterTopology, HostBinding},
};
// -------------------------------------------------------------------------------------------------
// Step 05: Sanity Check
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
@ -76,6 +93,9 @@ impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.run_checks().await?;
Ok(Outcome::success("Sanity checks passed".into()))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Sanity checks passed".into(),
))
}
}

View File

@ -1,15 +1,32 @@
// -------------------------------------------------------------------------------------------------
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::info;
use serde::Serialize;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use std::{fmt::Write, path::PathBuf};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
data::Version,
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
},
},
score::Score,
topology::HAClusterTopology,
topology::{HAClusterTopology, HostBinding},
};
// Step 06: Installation Report
@ -76,6 +93,9 @@ impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.generate().await?;
Ok(Outcome::success("Installation report generated".into()))
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Installation report generated".into(),
))
}
}

View File

@ -77,6 +77,8 @@ impl OKDBootstrapLoadBalancerScore {
address: topology.bootstrap_host.ip.to_string(),
port,
});
backend.dedup();
backend
}
}

View File

@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule {
pub fn alert_container_restarting() -> PrometheusAlertRule {
PrometheusAlertRule {
alert: "ContainerRestarting".into(),
expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(),
r#for: Some("30s".into()),
expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(),
r#for: Some("5m".into()),
labels: HashMap::from([("severity".into(), "warning".into())]),
annotations: HashMap::from([
(
@ -42,7 +42,7 @@ pub fn alert_pod_not_ready() -> PrometheusAlertRule {
PrometheusAlertRule {
alert: "PodNotReady".into(),
expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(),
r#for: Some("30s".into()),
r#for: Some("2m".into()),
labels: HashMap::from([("severity".into(), "warning".into())]),
annotations: HashMap::from([
("summary".into(), "Pod is not ready".into()),

View File

@ -1,4 +1,3 @@
use fqdn::fqdn;
use std::fs;
use std::{collections::BTreeMap, sync::Arc};
use tempfile::tempdir;
@ -9,7 +8,6 @@ use log::{debug, info};
use serde::Serialize;
use std::process::Command;
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
@ -25,18 +23,12 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
PrometheusRule, PrometheusRuleSpec, RuleGroup,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
build_prom_role, build_prom_rolebinding, build_prom_service_account,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
ServiceMonitor, ServiceMonitorSpec,
};
use crate::score::Score;
use crate::topology::ingress::Ingress;
use crate::topology::oberservability::monitoring::AlertReceiver;
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
use crate::{
@ -56,8 +48,8 @@ pub struct RHOBAlertingScore {
pub prometheus_rules: Vec<RuleGroup>,
}
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
Score<T> for RHOBAlertingScore
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
for RHOBAlertingScore
{
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(RHOBAlertingInterpret {
@ -82,20 +74,19 @@ pub struct RHOBAlertingInterpret {
}
#[async_trait]
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
Interpret<T> for RHOBAlertingInterpret
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
for RHOBAlertingInterpret
{
async fn execute(
&self,
inventory: &Inventory,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.ensure_grafana_operator().await?;
self.install_prometheus(inventory, topology, &client)
.await?;
self.install_prometheus(&client).await?;
self.install_client_kube_metrics().await?;
self.install_grafana(inventory, topology, &client).await?;
self.install_grafana(&client).await?;
self.install_receivers(&self.sender, &self.receivers)
.await?;
self.install_rules(&self.prometheus_rules, &client).await?;
@ -221,8 +212,7 @@ impl RHOBAlertingInterpret {
let output = Command::new("helm")
.args([
"upgrade",
"--install",
"install",
"grafana-operator",
"grafana-operator/grafana-operator",
"--namespace",
@ -236,7 +226,7 @@ impl RHOBAlertingInterpret {
if !output.status.success() {
return Err(InterpretError::new(format!(
"helm upgrade --install failed:\nstdout: {}\nstderr: {}",
"helm install failed:\nstdout: {}\nstderr: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
)));
@ -248,31 +238,25 @@ impl RHOBAlertingInterpret {
)))
}
async fn install_prometheus<T: Topology + K8sclient + Ingress>(
&self,
inventory: &Inventory,
topology: &T,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
async fn install_prometheus(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
debug!(
"installing crd-prometheuses in namespace {}",
self.sender.namespace.clone()
);
debug!("building role/rolebinding/serviceaccount for crd-prometheus");
let stack = MonitoringStack {
metadata: ObjectMeta {
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
namespace: Some(self.sender.namespace.clone()),
labels: Some([("monitoring-stack".into(), "true".into())].into()),
labels: Some([("coo".into(), "example".into())].into()),
..Default::default()
},
spec: MonitoringStackSpec {
log_level: Some("debug".into()),
retention: Some("1d".into()),
resource_selector: Some(LabelSelector {
match_labels: Default::default(),
match_expressions: vec![],
match_labels: [("app".into(), "demo".into())].into(),
..Default::default()
}),
},
};
@ -281,42 +265,6 @@ impl RHOBAlertingInterpret {
.apply(&stack, Some(&self.sender.namespace.clone()))
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
let alert_manager_domain = topology
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = format!("alertmanager-operated");
let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&alert_manager_domain),
backend_service: fqdn!(&backend_service),
port: 9093,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
let prometheus_domain = topology
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = format!("prometheus-operated");
let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&prometheus_domain),
backend_service: fqdn!(&backend_service),
port: 9090,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
alert_manager_ingress.interpret(inventory, topology).await?;
prometheus_ingress.interpret(inventory, topology).await?;
info!("installed rhob monitoring stack",);
Ok(Outcome::success(format!(
"successfully deployed rhob-prometheus {:#?}",
@ -324,6 +272,31 @@ impl RHOBAlertingInterpret {
)))
}
async fn install_alert_manager(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let am = Alertmanager {
metadata: ObjectMeta {
name: Some(self.sender.namespace.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(self.sender.namespace.clone()),
..Default::default()
},
spec: AlertmanagerSpec::default(),
};
client
.apply(&am, Some(&self.sender.namespace.clone()))
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
Ok(Outcome::success(format!(
"successfully deployed service monitor {:#?}",
am.metadata.name
)))
}
async fn install_monitors(
&self,
mut monitors: Vec<ServiceMonitor>,
@ -406,12 +379,7 @@ impl RHOBAlertingInterpret {
)))
}
async fn install_grafana<T: Topology + K8sclient + Ingress>(
&self,
inventory: &Inventory,
topology: &T,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
async fn install_grafana(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
let mut label = BTreeMap::new();
label.insert("dashboards".to_string(), "grafana".to_string());
let labels = LabelSelector {
@ -497,23 +465,6 @@ impl RHOBAlertingInterpret {
.apply(&grafana, Some(&self.sender.namespace.clone()))
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
let domain = topology
.get_domain(&format!("grafana-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-grafana", self.sender.namespace.clone());
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
let grafana_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&domain),
backend_service: fqdn!(&backend_service),
port: 3000,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
grafana_ingress.interpret(inventory, topology).await?;
Ok(Outcome::success(format!(
"successfully deployed grafana instance {:#?}",
grafana.metadata.name

View File

@ -178,10 +178,10 @@ fn handle_events() {
ApplicationFeatureStatus::Installing => {
info!("Installing feature '{feature}' for '{application}'...");
}
ApplicationFeatureStatus::Installed { details: _ } => {
ApplicationFeatureStatus::Installed => {
info!(status = "finished"; "Feature '{feature}' installed");
}
ApplicationFeatureStatus::Failed { message: details } => {
ApplicationFeatureStatus::Failed { details } => {
error!(status = "failed"; "Feature '{feature}' installation failed: {details}");
}
},

View File

@ -1,56 +0,0 @@
use std::sync::Mutex;
use harmony::{
instrumentation::{self, HarmonyEvent},
modules::application::ApplicationFeatureStatus,
};
use crate::theme;
pub fn init() {
let details: Mutex<Vec<String>> = Mutex::new(vec![]);
instrumentation::subscribe("Harmony CLI Reporter", {
move |event| {
let mut details = details.lock().unwrap();
match event {
HarmonyEvent::InterpretExecutionFinished {
execution_id: _,
topology: _,
interpret: _,
score: _,
outcome: Ok(outcome),
} => {
if outcome.status == harmony::interpret::InterpretStatus::SUCCESS {
details.extend(outcome.details.clone());
}
}
HarmonyEvent::ApplicationFeatureStateChanged {
topology: _,
application: _,
feature: _,
status:
ApplicationFeatureStatus::Installed {
details: feature_details,
},
} => {
details.extend(feature_details.clone());
}
HarmonyEvent::HarmonyFinished => {
if !details.is_empty() {
println!(
"\n{} All done! Here's what's next for you:",
theme::EMOJI_SUMMARY
);
for detail in details.iter() {
println!("- {detail}");
}
println!();
}
}
_ => {}
};
}
});
}

View File

@ -8,7 +8,6 @@ use inquire::Confirm;
use log::debug;
pub mod cli_logger; // FIXME: Don't make me pub
mod cli_reporter;
pub mod progress;
pub mod theme;
@ -117,7 +116,6 @@ pub async fn run_cli<T: Topology + Send + Sync + 'static>(
args: Args,
) -> Result<(), Box<dyn std::error::Error>> {
cli_logger::init();
cli_reporter::init();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(scores);

View File

@ -9,7 +9,6 @@ pub static EMOJI_ERROR: Emoji<'_, '_> = Emoji("⚠️", "");
pub static EMOJI_DEPLOY: Emoji<'_, '_> = Emoji("🚀", "");
pub static EMOJI_TOPOLOGY: Emoji<'_, '_> = Emoji("📦", "");
pub static EMOJI_SCORE: Emoji<'_, '_> = Emoji("🎶", "");
pub static EMOJI_SUMMARY: Emoji<'_, '_> = Emoji("🚀", "");
lazy_static! {
pub static ref SECTION_STYLE: ProgressStyle = ProgressStyle::default_spinner()

View File

@ -21,6 +21,7 @@ pub fn handle_events() {
instrumentation::subscribe("Harmony Composer Logger", {
move |event| match event {
HarmonyComposerEvent::HarmonyComposerStarted => {}
HarmonyComposerEvent::ProjectInitializationStarted => {
progress_tracker.add_section(
SETUP_SECTION,

View File

@ -5,6 +5,7 @@ use crate::{HarmonyProfile, HarmonyTarget};
#[derive(Debug, Clone)]
pub enum HarmonyComposerEvent {
HarmonyComposerStarted,
ProjectInitializationStarted,
ProjectInitialized,
ProjectCompilationStarted {

View File

@ -2,8 +2,8 @@ mod downloadable_asset;
use downloadable_asset::*;
use kube::Client;
use log::{debug, info};
use std::{ffi::OsStr, path::PathBuf};
use log::debug;
use std::path::PathBuf;
const K3D_BIN_FILE_NAME: &str = "k3d";
@ -213,19 +213,15 @@ impl K3d {
}
}
let client;
if !self.is_cluster_initialized() {
debug!("Cluster is not initialized, initializing now");
client = self.initialize_cluster().await?;
} else {
self.start_cluster().await?;
debug!("K3d and cluster are already properly set up");
client = self.create_kubernetes_client().await?;
return self.initialize_cluster().await;
}
self.ensure_k3d_config_is_default(self.get_cluster_name()?)?;
Ok(client)
self.start_cluster().await?;
debug!("K3d and cluster are already properly set up");
self.create_kubernetes_client().await
}
// Private helper methods
@ -306,16 +302,7 @@ impl K3d {
S: AsRef<std::ffi::OsStr>,
{
let binary_path = self.get_k3d_binary()?;
self.run_command(binary_path, args)
}
pub fn run_command<I, S, C>(&self, cmd: C, args: I) -> Result<std::process::Output, String>
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
C: AsRef<OsStr>,
{
let output = std::process::Command::new(cmd).args(args).output();
let output = std::process::Command::new(binary_path).args(args).output();
match output {
Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr);
@ -324,7 +311,7 @@ impl K3d {
debug!("stdout : {}", stdout);
Ok(output)
}
Err(e) => Err(format!("Failed to execute command: {}", e)),
Err(e) => Err(format!("Failed to execute k3d command: {}", e)),
}
}
@ -336,38 +323,12 @@ impl K3d {
return Err(format!("Failed to create cluster: {}", stderr));
}
info!("Successfully created k3d cluster '{}'", cluster_name);
Ok(())
}
fn ensure_k3d_config_is_default(&self, cluster_name: &str) -> Result<(), String> {
let output = self.run_k3d_command(["kubeconfig", "merge", "-d", cluster_name])?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to setup k3d kubeconfig : {}", stderr));
}
let output = self.run_command(
"kubectl",
["config", "use-context", &format!("k3d-{cluster_name}")],
)?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!(
"Failed to switch kubectl context to k3d : {}",
stderr
));
}
info!(
"kubectl is now using 'k3d-{}' as default context",
cluster_name
);
debug!("Successfully created k3d cluster '{}'", cluster_name);
Ok(())
}
async fn create_kubernetes_client(&self) -> Result<Client, String> {
// TODO: Connect the client to the right k3d cluster (see https://git.nationtech.io/NationTech/harmony/issues/92)
Client::try_default()
.await
.map_err(|e| format!("Failed to create Kubernetes client: {}", e))

View File

@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
}
}
#[derive(PartialEq, Debug)]
#[derive(PartialEq, Debug, Clone)]
pub struct HAProxyId(String);
impl Default for HAProxyId {
@ -297,7 +297,7 @@ pub struct HAProxyFrontends {
pub frontend: Vec<Frontend>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Frontend {
#[yaserde(attribute = true)]
pub uuid: String,
@ -310,7 +310,7 @@ pub struct Frontend {
pub bind_options: MaybeString,
pub mode: String,
#[yaserde(rename = "defaultBackend")]
pub default_backend: String,
pub default_backend: Option<String>,
pub ssl_enabled: i32,
pub ssl_certificates: MaybeString,
pub ssl_default_certificate: MaybeString,
@ -416,7 +416,7 @@ pub struct HAProxyBackends {
pub backends: Vec<HAProxyBackend>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyBackend {
#[yaserde(attribute = true, rename = "uuid")]
pub uuid: String,
@ -535,7 +535,7 @@ pub struct HAProxyServers {
pub servers: Vec<HAProxyServer>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyServer {
#[yaserde(attribute = true, rename = "uuid")]
pub uuid: String,
@ -543,8 +543,8 @@ pub struct HAProxyServer {
pub enabled: u8,
pub name: String,
pub description: MaybeString,
pub address: String,
pub port: u16,
pub address: Option<String>,
pub port: Option<u16>,
pub checkport: MaybeString,
pub mode: String,
pub multiplexer_protocol: MaybeString,
@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks {
pub healthchecks: Vec<HAProxyHealthCheck>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyHealthCheck {
#[yaserde(attribute = true)]
pub uuid: String,

View File

@ -25,6 +25,7 @@ sha2 = "0.10.9"
[dev-dependencies]
pretty_assertions.workspace = true
assertor.workspace = true
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }

View File

@ -30,8 +30,7 @@ impl SshConfigManager {
self.opnsense_shell
.exec(&format!(
"cp /conf/config.xml /conf/backup/{}",
backup_filename
"cp /conf/config.xml /conf/backup/{backup_filename}"
))
.await
}

View File

@ -1,9 +1,7 @@
mod ssh;
pub use ssh::*;
use async_trait::async_trait;
use crate::Error;
use async_trait::async_trait;
pub use ssh::*;
#[async_trait]
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {

View File

@ -1,11 +1,8 @@
use std::sync::Arc;
use log::warn;
use crate::{config::OPNsenseShell, Error};
use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
};
use crate::{config::OPNsenseShell, Error};
use std::{collections::HashSet, sync::Arc};
pub struct LoadBalancerConfig<'a> {
opnsense: &'a mut OPNsense,
@ -31,7 +28,7 @@ impl<'a> LoadBalancerConfig<'a> {
match &mut self.opnsense.opnsense.haproxy.as_mut() {
Some(haproxy) => f(haproxy),
None => unimplemented!(
"Adding a backend is not supported when haproxy config does not exist yet"
"Cannot configure load balancer when haproxy config does not exist yet"
),
}
}
@ -40,21 +37,67 @@ impl<'a> LoadBalancerConfig<'a> {
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
}
pub fn add_backend(&mut self, backend: HAProxyBackend) {
warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks");
self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend));
/// Configures a service by removing any existing service on the same port
/// and then adding the new definition. This ensures idempotency.
pub fn configure_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.remove_service_by_bind_address(&frontend.bind);
self.remove_servers(&servers);
self.add_new_service(frontend, backend, servers, healthcheck);
}
pub fn add_frontend(&mut self, frontend: Frontend) {
self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend));
// Remove the corresponding real servers based on their name if they already exist.
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
self.with_haproxy(|haproxy| {
haproxy
.servers
.servers
.retain(|s| !server_names.contains(&s.name));
});
}
pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) {
self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck));
/// Removes a service and its dependent components based on the frontend's bind address.
/// This performs a cascading delete of the frontend, backend, servers, and health check.
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
self.with_haproxy(|haproxy| {
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
return;
};
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
return;
};
remove_healthcheck(haproxy, &old_backend);
remove_linked_servers(haproxy, &old_backend);
});
}
pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) {
self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers));
/// Adds the components of a new service to the HAProxy configuration.
/// This function de-duplicates servers by name to prevent configuration errors.
fn add_new_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.with_haproxy(|haproxy| {
if let Some(check) = healthcheck {
haproxy.healthchecks.healthchecks.push(check);
}
haproxy.servers.servers.extend(servers);
haproxy.backends.backends.push(backend);
haproxy.frontends.frontend.push(frontend);
});
}
pub async fn reload_restart(&self) -> Result<(), Error> {
@ -82,3 +125,262 @@ impl<'a> LoadBalancerConfig<'a> {
Ok(())
}
}
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
let pos = haproxy
.frontends
.frontend
.iter()
.position(|f| f.bind == bind_address);
match pos {
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
None => None,
}
}
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
let default_backend = old_frontend.default_backend?;
let pos = haproxy
.backends
.backends
.iter()
.position(|b| b.uuid == default_backend);
match pos {
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
None => None, // orphaned frontend, shouldn't happen
}
}
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(uuid) = &backend.health_check.content {
haproxy
.healthchecks
.healthchecks
.retain(|h| h.uuid != *uuid);
}
}
/// Remove the backend's servers. This assumes servers are not shared between services.
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(server_uuids_str) = &backend.linked_servers.content {
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
haproxy
.servers
.servers
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
}
}
#[cfg(test)]
mod tests {
use crate::config::DummyOPNSenseShell;
use assertor::*;
use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
};
use std::sync::Arc;
use super::LoadBalancerConfig;
static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
static SERVER_ADDRESS: &str = "1.1.1.1:80";
static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
#[test]
fn configure_service_should_add_all_service_components_to_haproxy() {
let mut opnsense = given_opnsense();
let mut load_balancer = given_load_balancer(&mut opnsense);
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
load_balancer.configure_service(
frontend.clone(),
backend.clone(),
servers.clone(),
Some(healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![frontend],
vec![backend],
servers,
vec![healthcheck],
);
}
#[test]
fn configure_service_should_replace_service_on_same_bind_address() {
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
let mut opnsense = given_opnsense_with(given_haproxy(
vec![frontend.clone()],
vec![backend.clone()],
servers.clone(),
vec![healthcheck.clone()],
));
let mut load_balancer = given_load_balancer(&mut opnsense);
let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
load_balancer.configure_service(
updated_frontend.clone(),
updated_backend.clone(),
updated_servers.clone(),
Some(updated_healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![updated_frontend],
vec![updated_backend],
updated_servers,
vec![updated_healthcheck],
);
}
#[test]
fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
let (other_healthcheck, other_servers, other_backend, other_frontend) =
given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
let mut opnsense = given_opnsense_with(given_haproxy(
vec![frontend.clone()],
vec![backend.clone()],
servers.clone(),
vec![healthcheck.clone()],
));
let mut load_balancer = given_load_balancer(&mut opnsense);
load_balancer.configure_service(
other_frontend.clone(),
other_backend.clone(),
other_servers.clone(),
Some(other_healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![frontend, other_frontend],
vec![backend, other_backend],
[servers, other_servers].concat(),
vec![healthcheck, other_healthcheck],
);
}
fn assert_haproxy_configured_with(
opnsense: OPNsense,
frontends: Vec<Frontend>,
backends: Vec<HAProxyBackend>,
servers: Vec<HAProxyServer>,
healthchecks: Vec<HAProxyHealthCheck>,
) {
let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
assert_that!(haproxy.backends.backends).contains_exactly(backends);
assert_that!(haproxy.servers.servers).is_equal_to(servers);
assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
}
fn given_opnsense() -> OPNsense {
OPNsense::default()
}
fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
let mut opnsense = OPNsense::default();
opnsense.opnsense.haproxy = Some(haproxy);
opnsense
}
fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
let opnsense_shell = Arc::new(DummyOPNSenseShell {});
if opnsense.opnsense.haproxy.is_none() {
opnsense.opnsense.haproxy = Some(HAProxy::default());
}
LoadBalancerConfig::new(opnsense, opnsense_shell)
}
fn given_service(
bind_address: &str,
server_address: &str,
) -> (
HAProxyHealthCheck,
Vec<HAProxyServer>,
HAProxyBackend,
Frontend,
) {
let healthcheck = given_healthcheck();
let servers = vec![given_server(server_address)];
let backend = given_backend();
let frontend = given_frontend(bind_address);
(healthcheck, servers, backend, frontend)
}
fn given_haproxy(
frontends: Vec<Frontend>,
backends: Vec<HAProxyBackend>,
servers: Vec<HAProxyServer>,
healthchecks: Vec<HAProxyHealthCheck>,
) -> HAProxy {
HAProxy {
frontends: HAProxyFrontends {
frontend: frontends,
},
backends: HAProxyBackends { backends },
servers: HAProxyServers { servers },
healthchecks: HAProxyHealthChecks { healthchecks },
..Default::default()
}
}
fn given_frontend(bind_address: &str) -> Frontend {
Frontend {
uuid: "uuid".into(),
id: HAProxyId::default(),
enabled: 1,
name: format!("frontend_{bind_address}"),
bind: bind_address.into(),
default_backend: Some("backend-uuid".into()),
..Default::default()
}
}
fn given_backend() -> HAProxyBackend {
HAProxyBackend {
uuid: "backend-uuid".into(),
id: HAProxyId::default(),
enabled: 1,
name: "backend_192.168.1.1:80".into(),
linked_servers: MaybeString::from("server-uuid"),
health_check_enabled: 1,
health_check: MaybeString::from("healthcheck-uuid"),
..Default::default()
}
}
fn given_server(address: &str) -> HAProxyServer {
HAProxyServer {
uuid: "server-uuid".into(),
id: HAProxyId::default(),
name: address.into(),
address: Some(address.into()),
..Default::default()
}
}
fn given_healthcheck() -> HAProxyHealthCheck {
HAProxyHealthCheck {
uuid: "healthcheck-uuid".into(),
name: "healthcheck".into(),
..Default::default()
}
}
}