Compare commits
31 Commits
secrets-pr
...
79e406f126
| Author | SHA1 | Date | |
|---|---|---|---|
| 79e406f126 | |||
| 0700e30299 | |||
| 528ee8a696 | |||
| 69a159711a | |||
| b0ad7bb4c4 | |||
| 5f78300d78 | |||
| 2d3c32469c | |||
| 1cec398d4d | |||
| cbbaae2ac8 | |||
| f073b7e5fb | |||
| c84b2413ed | |||
| f83fd09f11 | |||
| c15bd53331 | |||
| 6e6f57e38c | |||
| 6f55f79281 | |||
| 19f87fdaf7 | |||
| 49370af176 | |||
| cf0b8326dc | |||
| 1e2563f7d1 | |||
| 7f50c36f11 | |||
| 4df451bc41 | |||
| 49dad343ad | |||
| 9961e8b79d | |||
| 9b889f71da | |||
| 7514ebfb5c | |||
| b3ae4e6611 | |||
| 8424778871 | |||
| 7bc083701e | |||
| 4fa2b8deb6 | |||
|
|
f3639c604c | ||
| ceafabf430 |
69
README.md
@@ -36,48 +36,59 @@ These principles surface as simple, ergonomic Rust APIs that let teams focus on
|
||||
|
||||
## 2 · Quick Start
|
||||
|
||||
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
lamp::{LAMPConfig, LAMPScore},
|
||||
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// 1. Describe what you want
|
||||
let lamp_stack = LAMPScore {
|
||||
name: "harmony-lamp-demo".into(),
|
||||
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
|
||||
php_version: Version::from("8.3.0").unwrap(),
|
||||
config: LAMPConfig {
|
||||
project_root: "./php".into(),
|
||||
database_size: "4Gi".into(),
|
||||
..Default::default()
|
||||
},
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-leptos".to_string(),
|
||||
project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
});
|
||||
|
||||
// Define your Application deployment and the features you want
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(PackagingDeployment {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![
|
||||
Box::new(DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url
|
||||
}),
|
||||
],
|
||||
}),
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
// 2. Enhance with extra scores (monitoring, CI/CD, …)
|
||||
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||
|
||||
// 3. Run your scores on the desired topology & inventory
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||
vec![
|
||||
Box::new(lamp_stack),
|
||||
Box::new(monitoring)
|
||||
],
|
||||
None
|
||||
).await.unwrap();
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster
|
||||
vec![Box::new(app)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
3
demos/cncf-k8s-quebec-meetup-september-2025/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
.terraform
|
||||
*.tfstate
|
||||
venv
|
||||
BIN
demos/cncf-k8s-quebec-meetup-september-2025/75_years_later.jpg
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer.jpg
Normal file
|
After Width: | Height: | Size: 38 KiB |
|
After Width: | Height: | Size: 38 KiB |
|
After Width: | Height: | Size: 52 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 64 KiB |
|
After Width: | Height: | Size: 100 KiB |
5
demos/cncf-k8s-quebec-meetup-september-2025/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
To build :
|
||||
|
||||
```bash
|
||||
npx @marp-team/marp-cli@latest -w slides.md
|
||||
```
|
||||
BIN
demos/cncf-k8s-quebec-meetup-september-2025/ansible.jpg
Normal file
|
After Width: | Height: | Size: 11 KiB |
@@ -0,0 +1,9 @@
|
||||
To run this :
|
||||
|
||||
```bash
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install ansible ansible-dev-tools
|
||||
ansible-lint download.yml
|
||||
ansible-playbook -i localhost download.yml
|
||||
```
|
||||
@@ -0,0 +1,8 @@
|
||||
- name: Test Ansible URL Validation
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Download a file
|
||||
ansible.builtin.get_url:
|
||||
url: "http:/wikipedia.org/"
|
||||
dest: "/tmp/ansible-test/wikipedia.html"
|
||||
mode: '0900'
|
||||
|
After Width: | Height: | Size: 22 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/ansible_fail.jpg
Normal file
|
After Width: | Height: | Size: 275 KiB |
|
After Width: | Height: | Size: 212 KiB |
|
After Width: | Height: | Size: 384 KiB |
|
After Width: | Height: | Size: 8.3 KiB |
195
demos/cncf-k8s-quebec-meetup-september-2025/slides.html
Normal file
241
demos/cncf-k8s-quebec-meetup-september-2025/slides.md
Normal file
@@ -0,0 +1,241 @@
|
||||
---
|
||||
theme: uncover
|
||||
---
|
||||
|
||||
# Voici l'histoire de Petit Poisson
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./happy_landscape_swimmer.jpg" width="1000"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
||||
|
||||
<img src="./tryrust.org.png" width="600"/>
|
||||
|
||||
[https://tryrust.org](https://tryrust.org)
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_1.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_2.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_3.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_4.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
## Demo time
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_sunglasses.jpg" width="1000"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_download_wikipedia.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible.jpg" width="200"/>
|
||||
|
||||
## Ansible❓
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
||||
|
||||
```yaml
|
||||
- name: Download wikipedia
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Download a file
|
||||
ansible.builtin.get_url:
|
||||
url: "https:/wikipedia.org/"
|
||||
dest: "/tmp/ansible-test/wikipedia.html"
|
||||
mode: '0900'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
||||
|
||||
```
|
||||
ansible-lint download.yml
|
||||
|
||||
Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
git push
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
<img src="./75_years_later.jpg" width="1100"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_download_wikipedia_fail.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed.jpg" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible_output_fail.jpg" width="1100"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible_crossed_out.jpg" width="400"/>
|
||||
|
||||
---
|
||||
|
||||
|
||||
<img src="./terraform.jpg" width="400"/>
|
||||
|
||||
## Terraform❓❗
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="200"/>
|
||||
<img src="./terraform.jpg" width="200"/>
|
||||
|
||||
```tf
|
||||
provider "docker" {}
|
||||
|
||||
resource "docker_network" "invalid_network" {
|
||||
name = "my-invalid-network"
|
||||
|
||||
ipam_config {
|
||||
subnet = "172.17.0.0/33"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="100"/>
|
||||
<img src="./terraform.jpg" width="200"/>
|
||||
|
||||
```
|
||||
terraform plan
|
||||
|
||||
Terraform used the selected providers to generate the following execution plan.
|
||||
Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
# docker_network.invalid_network will be created
|
||||
+ resource "docker_network" "invalid_network" {
|
||||
+ driver = (known after apply)
|
||||
+ id = (known after apply)
|
||||
+ internal = (known after apply)
|
||||
+ ipam_driver = "default"
|
||||
+ name = "my-invalid-network"
|
||||
+ options = (known after apply)
|
||||
+ scope = (known after apply)
|
||||
|
||||
+ ipam_config {
|
||||
+ subnet = "172.17.0.0/33"
|
||||
# (2 unchanged attributes hidden)
|
||||
}
|
||||
}
|
||||
|
||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
✅
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
terraform apply
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
||||
|
||||
Do you want to perform these actions?
|
||||
Terraform will perform the actions described above.
|
||||
Only 'yes' will be accepted to approve.
|
||||
|
||||
Enter a value: yes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
docker_network.invalid_network: Creating...
|
||||
╷
|
||||
│ Error: Unable to create network: Error response from daemon: invalid network config:
|
||||
│ invalid subnet 172.17.0.0/33: invalid CIDR block notation
|
||||
│
|
||||
│ with docker_network.invalid_network,
|
||||
│ on main.tf line 11, in resource "docker_network" "invalid_network":
|
||||
│ 11: resource "docker_network" "invalid_network" {
|
||||
│
|
||||
╵
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
|
||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="1100"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible_crossed_out.jpg" width="300"/>
|
||||
<img src="./terraform_crossed_out.jpg" width="400"/>
|
||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="300"/>
|
||||
|
||||
---
|
||||
|
||||
## Harmony❓❗
|
||||
|
||||
---
|
||||
|
||||
Demo time
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="300"/>
|
||||
|
||||
---
|
||||
|
||||
# 🎼
|
||||
|
||||
Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony)
|
||||
|
||||
|
||||
<img src="./qrcode_gitea_nationtech.png" width="120"/>
|
||||
|
||||
|
||||
LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/)
|
||||
|
||||
Courriel : [jg@nationtech.io](mailto:jg@nationtech.io)
|
||||
BIN
demos/cncf-k8s-quebec-meetup-september-2025/terraform.jpg
Normal file
|
After Width: | Height: | Size: 11 KiB |
40
demos/cncf-k8s-quebec-meetup-september-2025/terraform/.terraform.lock.hcl
generated
Normal file
@@ -0,0 +1,40 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/http" {
|
||||
version = "3.5.0"
|
||||
hashes = [
|
||||
"h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=",
|
||||
"zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7",
|
||||
"zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736",
|
||||
"zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476",
|
||||
"zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90",
|
||||
"zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a",
|
||||
"zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6",
|
||||
"zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6",
|
||||
"zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee",
|
||||
"zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926",
|
||||
"zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/local" {
|
||||
version = "2.5.3"
|
||||
hashes = [
|
||||
"h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=",
|
||||
"zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927",
|
||||
"zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e",
|
||||
"zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf",
|
||||
"zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d",
|
||||
"zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09",
|
||||
"zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f",
|
||||
"zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1",
|
||||
"zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6",
|
||||
"zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47",
|
||||
"zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82",
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
provider "http" {}
|
||||
|
||||
data "http" "remote_file" {
|
||||
url = "http:/example.com/file.txt"
|
||||
}
|
||||
|
||||
resource "local_file" "downloaded_file" {
|
||||
content = data.http.remote_file.body
|
||||
filename = "${path.module}/downloaded_file.txt"
|
||||
}
|
||||
24
demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/.terraform.lock.hcl
generated
Normal file
@@ -0,0 +1,24 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/kreuzwerker/docker" {
|
||||
version = "3.0.2"
|
||||
constraints = "~> 3.0.1"
|
||||
hashes = [
|
||||
"h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=",
|
||||
"zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f",
|
||||
"zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95",
|
||||
"zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138",
|
||||
"zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da",
|
||||
"zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2",
|
||||
"zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06",
|
||||
"zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1",
|
||||
"zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710",
|
||||
"zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005",
|
||||
"zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d",
|
||||
"zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792",
|
||||
"zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387",
|
||||
"zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62",
|
||||
"zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d",
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
docker = {
|
||||
source = "kreuzwerker/docker"
|
||||
version = "~> 3.0.1" # Adjust version as needed
|
||||
}
|
||||
}
|
||||
}
|
||||
provider "docker" {}
|
||||
|
||||
resource "docker_network" "invalid_network" {
|
||||
name = "my-invalid-network"
|
||||
|
||||
ipam_config {
|
||||
subnet = "172.17.0.0/33"
|
||||
}
|
||||
}
|
||||
|
After Width: | Height: | Size: 14 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail.jpg
Normal file
|
After Width: | Height: | Size: 144 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 71 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 87 KiB |
|
After Width: | Height: | Size: 88 KiB |
|
After Width: | Height: | Size: 48 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/tryrust.org.png
Normal file
|
After Width: | Height: | Size: 325 KiB |
@@ -27,6 +27,7 @@ async fn main() {
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
dns: "example-monitoring.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -4,8 +4,7 @@ use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::rhob_monitoring::RHOBMonitoring,
|
||||
ApplicationScore, RustWebFramework, RustWebapp, features::rhob_monitoring::Monitoring,
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
@@ -17,6 +16,7 @@ use harmony_types::net::Url;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "test-rhob-monitoring".to_string(),
|
||||
dns: "test-rhob-monitoring.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
@@ -29,7 +29,7 @@ async fn main() {
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(RHOBMonitoring {
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![Box::new(discord_receiver)],
|
||||
}),
|
||||
|
||||
@@ -5,7 +5,7 @@ use harmony::{
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring},
|
||||
features::{Monitoring, PackagingDeployment},
|
||||
},
|
||||
monitoring::alert_channel::{
|
||||
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
||||
@@ -19,6 +19,7 @@ use harmony_macros::hurl;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
dns: "harmony-example-rust-webapp.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
@@ -36,7 +37,7 @@ async fn main() {
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(ContinuousDelivery {
|
||||
Box::new(PackagingDeployment {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(Monitoring {
|
||||
|
||||
1
examples/try_rust_webapp/files_to_add/.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
harmony
|
||||
20
examples/try_rust_webapp/files_to_add/Cargo.toml.to_add
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "harmony-tryrust"
|
||||
edition = "2024"
|
||||
version = "0.1.0"
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../../nationtech/harmony/harmony" }
|
||||
harmony_cli = { path = "../../../nationtech/harmony/harmony_cli" }
|
||||
harmony_types = { path = "../../../nationtech/harmony/harmony_types" }
|
||||
harmony_macros = { path = "../../../nationtech/harmony/harmony_macros" }
|
||||
tokio = { version = "1.40", features = [
|
||||
"io-std",
|
||||
"fs",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
log = { version = "0.4", features = ["kv"] }
|
||||
env_logger = "0.11"
|
||||
url = "2.5"
|
||||
base64 = "0.22.1"
|
||||
49
examples/try_rust_webapp/files_to_add/main.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
features::{rhob_monitoring::Monitoring, PackagingDeployment}, ApplicationScore, RustWebFramework, RustWebapp
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, LocalhostTopology},
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "tryrust".to_string(),
|
||||
project_root: PathBuf::from(".."),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
});
|
||||
|
||||
let discord_webhook = DiscordWebhook {
|
||||
name: "harmony-demo".to_string(),
|
||||
url: hurl!("https://discord.com/api/webhooks/1415391405681021050/V6KzV41vQ7yvbn7BchejRu9C8OANxy0i2ESZOz2nvCxG8xAY3-2i3s5MS38k568JKTzH"),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(PackagingDeployment {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![Box::new(discord_webhook)],
|
||||
}),
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(app)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -3,7 +3,7 @@ use harmony::{
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring, rhob_monitoring::RHOBMonitoring},
|
||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
@@ -16,24 +16,25 @@ use std::{path::PathBuf, sync::Arc};
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-tryrust".to_string(),
|
||||
project_root: PathBuf::from("./tryrust.org"),
|
||||
dns: "tryrust.example.harmony.mcd".to_string(),
|
||||
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
||||
// submodule
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
});
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"),
|
||||
};
|
||||
|
||||
// Define your Application deployment and the features you want
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(ContinuousDelivery {
|
||||
Box::new(PackagingDeployment {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(RHOBMonitoring {
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![Box::new(discord_receiver)],
|
||||
alert_receiver: vec![Box::new(DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"),
|
||||
})],
|
||||
}),
|
||||
],
|
||||
application,
|
||||
@@ -41,7 +42,7 @@ async fn main() {
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned k3d by default or connect to any kubernetes cluster
|
||||
vec![Box::new(app)],
|
||||
None,
|
||||
)
|
||||
|
||||
@@ -34,6 +34,7 @@ pub enum InterpretName {
|
||||
CephClusterHealth,
|
||||
Custom(&'static str),
|
||||
RHOBAlerting,
|
||||
K8sIngress,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for InterpretName {
|
||||
@@ -64,6 +65,7 @@ impl std::fmt::Display for InterpretName {
|
||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||
InterpretName::Custom(name) => f.write_str(name),
|
||||
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
|
||||
InterpretName::K8sIngress => f.write_str("K8sIngress"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,13 +84,15 @@ pub trait Interpret<T>: std::fmt::Debug + Send {
|
||||
pub struct Outcome {
|
||||
pub status: InterpretStatus,
|
||||
pub message: String,
|
||||
pub details: Vec<String>,
|
||||
}
|
||||
|
||||
impl Outcome {
|
||||
pub fn noop() -> Self {
|
||||
pub fn noop(message: String) -> Self {
|
||||
Self {
|
||||
status: InterpretStatus::NOOP,
|
||||
message: String::new(),
|
||||
message,
|
||||
details: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,6 +100,23 @@ impl Outcome {
|
||||
Self {
|
||||
status: InterpretStatus::SUCCESS,
|
||||
message,
|
||||
details: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn success_with_details(message: String, details: Vec<String>) -> Self {
|
||||
Self {
|
||||
status: InterpretStatus::SUCCESS,
|
||||
message,
|
||||
details,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn running(message: String) -> Self {
|
||||
Self {
|
||||
status: InterpretStatus::RUNNING,
|
||||
message,
|
||||
details: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{
|
||||
ClusterResourceScope, NamespaceResourceScope,
|
||||
api::{apps::v1::Deployment, core::v1::Pod},
|
||||
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
|
||||
apimachinery::pkg::version::Info,
|
||||
};
|
||||
use kube::{
|
||||
Client, Config, Error, Resource,
|
||||
Client, Config, Discovery, Error, Resource,
|
||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
error::DiscoveryError,
|
||||
runtime::reflector::Lookup,
|
||||
};
|
||||
use kube::{api::DynamicObject, runtime::conditions};
|
||||
@@ -17,9 +22,9 @@ use kube::{
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::{Value, json};
|
||||
use serde_json::{json, Value};
|
||||
use similar::TextDiff;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::{io::AsyncReadExt, time::sleep};
|
||||
|
||||
#[derive(new, Clone)]
|
||||
pub struct K8sClient {
|
||||
@@ -53,6 +58,159 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
// Returns true if any deployment in the given namespace matching the label selector
|
||||
// has status.availableReplicas > 0 (or condition Available=True).
|
||||
pub async fn has_healthy_deployment_with_label(
|
||||
&self,
|
||||
namespace: &str,
|
||||
label_selector: &str,
|
||||
) -> Result<bool, Error> {
|
||||
let api: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
|
||||
let lp = ListParams::default().labels(label_selector);
|
||||
let list = api.list(&lp).await?;
|
||||
for d in list.items {
|
||||
// Check AvailableReplicas > 0 or Available condition
|
||||
let available = d
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.available_replicas)
|
||||
.unwrap_or(0);
|
||||
if available > 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
// Fallback: scan conditions
|
||||
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
|
||||
if conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
}) {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Cluster-wide: returns namespaces that have at least one healthy deployment
|
||||
// matching the label selector (equivalent to kubectl -A -l ...).
|
||||
pub async fn list_namespaces_with_healthy_deployments(
|
||||
&self,
|
||||
label_selector: &str,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let api: Api<Deployment> = Api::all(self.client.clone());
|
||||
let lp = ListParams::default().labels(label_selector);
|
||||
let list = api.list(&lp).await?;
|
||||
|
||||
let mut healthy_ns: HashMap<String, bool> = HashMap::new();
|
||||
for d in list.items {
|
||||
let ns = match d.metadata.namespace.clone() {
|
||||
Some(n) => n,
|
||||
None => continue,
|
||||
};
|
||||
let available = d
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.available_replicas)
|
||||
.unwrap_or(0);
|
||||
let is_healthy = if available > 0 {
|
||||
true
|
||||
} else {
|
||||
d.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.conditions.as_ref())
|
||||
.map(|conds| {
|
||||
conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
})
|
||||
})
|
||||
.unwrap_or(false)
|
||||
};
|
||||
if is_healthy {
|
||||
healthy_ns.insert(ns, true);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(healthy_ns.into_keys().collect())
|
||||
}
|
||||
|
||||
// Get the application-controller ServiceAccount name (fallback to default)
|
||||
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> {
|
||||
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
|
||||
let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
|
||||
let list = api.list(&lp).await?;
|
||||
if let Some(dep) = list.items.get(0) {
|
||||
if let Some(sa) = dep
|
||||
.spec
|
||||
.as_ref()
|
||||
.and_then(|ds| ds.template.spec.as_ref())
|
||||
.and_then(|ps| ps.service_account_name.clone())
|
||||
{
|
||||
return Ok(sa);
|
||||
}
|
||||
}
|
||||
Ok("argocd-application-controller".to_string())
|
||||
}
|
||||
|
||||
// List ClusterRoleBindings dynamically and return as JSON values
|
||||
pub async fn list_clusterrolebindings_json(&self) -> Result<Vec<Value>, Error> {
|
||||
let gvk = kube::api::GroupVersionKind::gvk(
|
||||
"rbac.authorization.k8s.io",
|
||||
"v1",
|
||||
"ClusterRoleBinding",
|
||||
);
|
||||
let ar = kube::api::ApiResource::from_gvk(&gvk);
|
||||
let api: Api<kube::api::DynamicObject> = Api::all_with(self.client.clone(), &ar);
|
||||
let crbs = api.list(&ListParams::default()).await?;
|
||||
let mut out = Vec::new();
|
||||
for o in crbs {
|
||||
let v = serde_json::to_value(&o).unwrap_or(Value::Null);
|
||||
out.push(v);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// Determine if Argo controller in ns has cluster-wide permissions via CRBs
|
||||
// TODO This does not belong in the generic k8s client, should be refactored at some point
|
||||
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> {
|
||||
let sa = self.get_argocd_controller_sa_name(ns).await?;
|
||||
let crbs = self.list_clusterrolebindings_json().await?;
|
||||
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
|
||||
for crb in crbs {
|
||||
if let Some(subjects) = crb.get("subjects").and_then(|s| s.as_array()) {
|
||||
for subj in subjects {
|
||||
let kind = subj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let name = subj.get("name").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let subj_ns = subj.get("namespace").and_then(|v| v.as_str()).unwrap_or("");
|
||||
if (kind == "ServiceAccount" && name == sa && subj_ns == ns)
|
||||
|| (kind == "User" && name == sa_user)
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub async fn has_crd(&self, name: &str) -> Result<bool, Error> {
|
||||
let api: Api<CustomResourceDefinition> = Api::all(self.client.clone());
|
||||
let lp = ListParams::default().fields(&format!("metadata.name={}", name));
|
||||
let crds = api.list(&lp).await?;
|
||||
Ok(!crds.items.is_empty())
|
||||
}
|
||||
|
||||
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
||||
let client: Client = self.client.clone();
|
||||
let version_info: Info = client.apiserver_version().await?;
|
||||
Ok(version_info)
|
||||
}
|
||||
|
||||
pub async fn discovery(&self) -> Result<Discovery, Error> {
|
||||
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
|
||||
Ok(discovery)
|
||||
}
|
||||
|
||||
pub async fn get_resource_json_value(
|
||||
&self,
|
||||
name: &str,
|
||||
@@ -153,6 +311,41 @@ impl K8sClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn wait_for_pod_ready(
|
||||
&self,
|
||||
pod_name: &str,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
let mut elapsed = 0;
|
||||
let interval = 5; // seconds between checks
|
||||
let timeout_secs = 120;
|
||||
loop {
|
||||
let pod = self.get_pod(pod_name, namespace).await?;
|
||||
|
||||
if let Some(p) = pod {
|
||||
if let Some(status) = p.status {
|
||||
if let Some(phase) = status.phase {
|
||||
if phase.to_lowercase() == "running" {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if elapsed >= timeout_secs {
|
||||
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||
"'{}' in ns '{}' did not become ready within {}s",
|
||||
pod_name,
|
||||
namespace.unwrap(),
|
||||
timeout_secs
|
||||
))));
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(interval)).await;
|
||||
elapsed += interval;
|
||||
}
|
||||
}
|
||||
|
||||
/// Will execute a commond in the first pod found that matches the specified label
|
||||
/// '{label}={name}'
|
||||
pub async fn exec_app_capture_output(
|
||||
@@ -419,9 +612,12 @@ impl K8sClient {
|
||||
.as_str()
|
||||
.expect("couldn't get kind as str");
|
||||
|
||||
let split: Vec<&str> = api_version.splitn(2, "/").collect();
|
||||
let g = split[0];
|
||||
let v = split[1];
|
||||
let mut it = api_version.splitn(2, '/');
|
||||
let first = it.next().unwrap();
|
||||
let (g, v) = match it.next() {
|
||||
Some(second) => (first, second),
|
||||
None => ("", first),
|
||||
};
|
||||
|
||||
let gvk = GroupVersionKind::gvk(g, v, kind);
|
||||
let api_resource = ApiResource::from_gvk(&gvk);
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::GroupVersionKind;
|
||||
use log::{debug, info, warn};
|
||||
use log::{debug, info, trace, warn};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
@@ -47,6 +47,13 @@ struct K8sState {
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum KubernetesDistribution {
|
||||
OpenshiftFamily,
|
||||
K3sFamily,
|
||||
Default,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum K8sSource {
|
||||
LocalK3d,
|
||||
@@ -57,12 +64,14 @@ enum K8sSource {
|
||||
pub struct K8sAnywhereTopology {
|
||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
||||
flavour: Arc<OnceCell<KubernetesDistribution>>,
|
||||
config: Arc<K8sAnywhereConfig>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl K8sclient for K8sAnywhereTopology {
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||
trace!("getting k8s client");
|
||||
let state = match self.k8s_state.get() {
|
||||
Some(state) => state,
|
||||
None => return Err("K8s state not initialized yet".to_string()),
|
||||
@@ -162,6 +171,7 @@ impl K8sAnywhereTopology {
|
||||
Self {
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
flavour: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
||||
}
|
||||
}
|
||||
@@ -170,10 +180,42 @@ impl K8sAnywhereTopology {
|
||||
Self {
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
flavour: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(config),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
|
||||
self.flavour
|
||||
.get_or_try_init(async || {
|
||||
let client = self.k8s_client().await.unwrap();
|
||||
|
||||
let discovery = client.discovery().await.map_err(|e| {
|
||||
PreparationError::new(format!("Could not discover API groups: {}", e))
|
||||
})?;
|
||||
|
||||
let version = client.get_apiserver_version().await.map_err(|e| {
|
||||
PreparationError::new(format!("Could not get server version: {}", e))
|
||||
})?;
|
||||
|
||||
// OpenShift / OKD
|
||||
if discovery
|
||||
.groups()
|
||||
.any(|g| g.name() == "project.openshift.io")
|
||||
{
|
||||
return Ok(KubernetesDistribution::OpenshiftFamily);
|
||||
}
|
||||
|
||||
// K3d / K3s
|
||||
if version.git_version.contains("k3s") {
|
||||
return Ok(KubernetesDistribution::K3sFamily);
|
||||
}
|
||||
|
||||
return Ok(KubernetesDistribution::Default);
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
||||
&self,
|
||||
sender: RHOBObservability,
|
||||
@@ -372,7 +414,9 @@ impl K8sAnywhereTopology {
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
warn!("Installing observability operator is not supported on LocalK3d source");
|
||||
warn!(
|
||||
"Installing observability operator is not supported on LocalK3d source"
|
||||
);
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
debug!("installing cluster observability operator");
|
||||
todo!();
|
||||
@@ -577,36 +621,56 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
use log::{trace, debug, warn};
|
||||
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
||||
K8sSource::LocalK3d => {
|
||||
// Local developer UX
|
||||
return Ok(format!("{service}.local.k3d"));
|
||||
}
|
||||
K8sSource::Kubeconfig => {
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
trace!("K8sSource is kubeconfig; attempting to detect domain");
|
||||
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
||||
})?;
|
||||
// 1) Try OpenShift IngressController domain (backward compatible)
|
||||
if self.openshift_ingress_operator_available().await.is_ok() {
|
||||
trace!("OpenShift ingress operator detected; using IngressController");
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await
|
||||
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
||||
if let Some(domain) = ic.data["status"]["domain"].as_str() {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
} else {
|
||||
warn!("OpenShift IngressController present but no status.domain set");
|
||||
}
|
||||
} else {
|
||||
trace!("OpenShift ingress operator not detected; trying generic Kubernetes");
|
||||
}
|
||||
|
||||
// 2) Try NGINX Ingress Controller common setups
|
||||
// 2.a) Well-known namespace/name for the controller Service
|
||||
// - upstream default: namespace "ingress-nginx", service "ingress-nginx-controller"
|
||||
// - some distros: "ingress-nginx-controller" svc in "ingress-nginx" ns
|
||||
// If found with LoadBalancer ingress hostname, use its base domain.
|
||||
if let Some(domain) = try_nginx_lb_domain(&client).await? {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
}
|
||||
|
||||
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
|
||||
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
|
||||
warn!("Could not determine external ingress domain; falling back to internal-only DNS");
|
||||
let internal = format!("{service}.default.svc.cluster.local");
|
||||
Ok(internal)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -616,3 +680,57 @@ impl Ingress for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
|
||||
use log::{trace, debug};
|
||||
|
||||
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
|
||||
let svc_gvk = GroupVersionKind {
|
||||
group: "".into(), // core
|
||||
version: "v1".into(),
|
||||
kind: "Service".into(),
|
||||
};
|
||||
|
||||
let candidates = [
|
||||
("ingress-nginx", "ingress-nginx-controller"),
|
||||
("ingress-nginx", "ingress-nginx-controller-internal"),
|
||||
("ingress-nginx", "ingress-nginx"), // some charts name the svc like this
|
||||
("kube-system", "ingress-nginx-controller"), // less common but seen
|
||||
];
|
||||
|
||||
for (ns, name) in candidates {
|
||||
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
|
||||
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await {
|
||||
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default();
|
||||
for entry in lb_hosts {
|
||||
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
|
||||
debug!("Found NGINX LB hostname: {host}");
|
||||
if let Some(domain) = extract_base_domain(host) {
|
||||
return Ok(Some(domain.to_string()));
|
||||
} else {
|
||||
return Ok(Some(host.to_string())); // already a domain
|
||||
}
|
||||
}
|
||||
if let Some(ip) = entry.get("ip").and_then(|v| v.as_str()) {
|
||||
// If only an IP is exposed, we can't create a hostname; return None to keep searching
|
||||
debug!("NGINX LB exposes IP {ip} (no hostname); skipping");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn extract_base_domain(host: &str) -> Option<String> {
|
||||
// For a host like a1b2c3d4e5f6abcdef.elb.amazonaws.com -> base domain elb.amazonaws.com
|
||||
// For a managed DNS like xyz.example.com -> base domain example.com (keep 2+ labels)
|
||||
// Heuristic: keep last 2 labels by default; special-case known multi-label TLDs if needed.
|
||||
let parts: Vec<&str> = host.split('.').collect();
|
||||
if parts.len() >= 2 {
|
||||
// Very conservative: last 2 labels
|
||||
Some(parts[parts.len() - 2..].join("."))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
||||
|
||||
#[derive(new)]
|
||||
#[derive(new, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct LocalhostTopology;
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -186,7 +186,7 @@ impl TopologyState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum DeploymentTarget {
|
||||
LocalDev,
|
||||
Staging,
|
||||
|
||||
@@ -10,7 +10,7 @@ use super::OPNSenseFirewall;
|
||||
|
||||
#[async_trait]
|
||||
impl DnsServer for OPNSenseFirewall {
|
||||
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||
todo!("Refactor this to use dnsmasq")
|
||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
// let mut dns = writable_opnsense.dns();
|
||||
@@ -68,7 +68,7 @@ impl DnsServer for OPNSenseFirewall {
|
||||
self.host.clone()
|
||||
}
|
||||
|
||||
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
|
||||
async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
|
||||
todo!("Refactor this to use dnsmasq")
|
||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
// let mut dns = writable_opnsense.dns();
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use std::error::Error;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::topology::Topology;
|
||||
use crate::{executors::ExecutorError, topology::Topology};
|
||||
|
||||
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
||||
/// ContinuousIntegration, ContinuousDelivery
|
||||
@@ -9,7 +12,10 @@ use crate::topology::Topology;
|
||||
pub trait ApplicationFeature<T: Topology>:
|
||||
std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T>
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String>;
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
topology: &T,
|
||||
) -> Result<InstallationOutcome, InstallationError>;
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
@@ -40,3 +46,60 @@ impl<T: Topology> Clone for Box<dyn ApplicationFeature<T>> {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum InstallationOutcome {
|
||||
Success { details: Vec<String> },
|
||||
Noop,
|
||||
}
|
||||
|
||||
impl InstallationOutcome {
|
||||
pub fn success() -> Self {
|
||||
Self::Success { details: vec![] }
|
||||
}
|
||||
|
||||
pub fn success_with_details(details: Vec<String>) -> Self {
|
||||
Self::Success { details }
|
||||
}
|
||||
|
||||
pub fn noop() -> Self {
|
||||
Self::Noop
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, new)]
|
||||
pub struct InstallationError {
|
||||
msg: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for InstallationError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for InstallationError {}
|
||||
|
||||
impl From<ExecutorError> for InstallationError {
|
||||
fn from(value: ExecutorError) -> Self {
|
||||
Self {
|
||||
msg: format!("InstallationError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<kube::Error> for InstallationError {
|
||||
fn from(value: kube::Error) -> Self {
|
||||
Self {
|
||||
msg: format!("InstallationError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for InstallationError {
|
||||
fn from(value: String) -> Self {
|
||||
Self {
|
||||
msg: format!("PreparationError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct Helm {
|
||||
pub skip_schema_validation: Option<bool>,
|
||||
pub version: Option<String>,
|
||||
pub kube_version: Option<String>,
|
||||
pub api_versions: Vec<String>,
|
||||
// pub api_versions: Vec<String>,
|
||||
pub namespace: Option<String>,
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
// api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
// api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
},
|
||||
@@ -181,13 +181,11 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
}
|
||||
|
||||
impl ArgoApplication {
|
||||
pub fn to_yaml(&self) -> serde_yaml::Value {
|
||||
pub fn to_yaml(&self, target_namespace: Option<&str>) -> serde_yaml::Value {
|
||||
let name = &self.name;
|
||||
let namespace = if let Some(ns) = self.namespace.as_ref() {
|
||||
ns
|
||||
} else {
|
||||
"argocd"
|
||||
};
|
||||
let default_ns = "argocd".to_string();
|
||||
let namespace: &str =
|
||||
target_namespace.unwrap_or(self.namespace.as_ref().unwrap_or(&default_ns));
|
||||
let project = &self.project;
|
||||
|
||||
let yaml_str = format!(
|
||||
@@ -285,7 +283,7 @@ mod tests {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
// api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -345,7 +343,7 @@ spec:
|
||||
|
||||
assert_eq!(
|
||||
expected_yaml_output.trim(),
|
||||
serde_yaml::to_string(&app.clone().to_yaml())
|
||||
serde_yaml::to_string(&app.clone().to_yaml(None))
|
||||
.unwrap()
|
||||
.trim()
|
||||
);
|
||||
|
||||
@@ -2,7 +2,7 @@ use async_trait::async_trait;
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
modules::application::ApplicationFeature,
|
||||
modules::application::{ApplicationFeature, InstallationError, InstallationOutcome},
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
|
||||
@@ -29,7 +29,10 @@ impl Default for PublicEndpoint {
|
||||
/// For now we only suport K8s ingress, but we will support more stuff at some point
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
|
||||
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
_topology: &T,
|
||||
) -> Result<InstallationOutcome, InstallationError> {
|
||||
info!(
|
||||
"Making sure public endpoint is installed for port {}",
|
||||
self.application_port
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
use async_trait::async_trait;
|
||||
use kube::{Api, api::GroupVersionKind};
|
||||
use log::{debug, warn};
|
||||
use log::{debug, info, trace, warn};
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::{process::Command, str::FromStr, sync::Arc};
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
||||
k8s::K8sClient,
|
||||
modules::{
|
||||
argocd::{ArgoDeploymentType, detect_argo_deployment_type},
|
||||
helm::chart::{HelmChartScore, HelmRepository},
|
||||
},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
@@ -24,6 +22,7 @@ use super::ArgoApplication;
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct ArgoHelmScore {
|
||||
pub namespace: String,
|
||||
// TODO: remove and rely on topology (it now knows the flavor)
|
||||
pub openshift: bool,
|
||||
pub argo_apps: Vec<ArgoApplication>,
|
||||
}
|
||||
@@ -54,26 +53,102 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let domain = topology.get_domain("argo").await?;
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
trace!("Starting ArgoInterpret execution {self:?}");
|
||||
let k8s_client: Arc<K8sClient> = topology.k8s_client().await?;
|
||||
trace!("Got k8s client");
|
||||
let desired_ns = self.score.namespace.clone();
|
||||
|
||||
helm_score.interpret(inventory, topology).await?;
|
||||
debug!("ArgoInterpret detecting cluster configuration");
|
||||
let svc = format!("argo-{}", desired_ns);
|
||||
let domain = topology.get_domain(&svc).await?;
|
||||
debug!("Resolved Argo service domain for '{}': {}", svc, domain);
|
||||
|
||||
k8s_client
|
||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
// Detect current Argo deployment type
|
||||
let current = detect_argo_deployment_type(&k8s_client, &desired_ns).await?;
|
||||
info!("Detected Argo deployment type: {:?}", current);
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"ArgoCD installed with {} {}",
|
||||
self.argo_apps.len(),
|
||||
match self.argo_apps.len() {
|
||||
1 => "application",
|
||||
_ => "applications",
|
||||
// Decide control namespace and whether we must install
|
||||
let (control_ns, must_install) = match current.clone() {
|
||||
ArgoDeploymentType::NotInstalled => {
|
||||
info!(
|
||||
"Argo CD not installed. Will install via Helm into namespace '{}'.",
|
||||
desired_ns
|
||||
);
|
||||
(desired_ns.clone(), true)
|
||||
}
|
||||
)))
|
||||
ArgoDeploymentType::AvailableInDesiredNamespace(ns) => {
|
||||
info!(
|
||||
"Argo CD already installed by Harmony in '{}'. Skipping install.",
|
||||
ns
|
||||
);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledClusterWide(ns) => {
|
||||
info!(
|
||||
"Argo CD installed cluster-wide in namespace '{}'.",
|
||||
ns
|
||||
);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {
|
||||
// TODO we could support this use case by installing a new argo instance. But that
|
||||
// means handling a few cases that are out of scope for now :
|
||||
// - Wether argo operator is installed
|
||||
// - Managing CRD versions compatibility
|
||||
// - Potentially handling the various k8s flavors and setups we might encounter
|
||||
//
|
||||
// There is a possibility that the helm chart already handles most or even all of these use cases but they are out of scope for now.
|
||||
let msg = format!(
|
||||
"Argo CD found in '{}' but it is namespace-scoped and not supported for attachment yet.",
|
||||
ns
|
||||
);
|
||||
warn!("{}", msg);
|
||||
return Err(InterpretError::new(msg));
|
||||
}
|
||||
};
|
||||
|
||||
info!("ArgoCD will be installed : {must_install} . Current argocd status : {current:?} ");
|
||||
|
||||
if must_install {
|
||||
let helm_score = argo_helm_chart_score(&desired_ns, self.score.openshift, &domain);
|
||||
info!(
|
||||
"Installing Argo CD via Helm into namespace '{}' ...",
|
||||
desired_ns
|
||||
);
|
||||
helm_score.interpret(inventory, topology).await?;
|
||||
info!("Argo CD install complete in '{}'.", desired_ns);
|
||||
}
|
||||
|
||||
let yamls: Vec<serde_yaml::Value> = self
|
||||
.argo_apps
|
||||
.iter()
|
||||
.map(|a| a.to_yaml(Some(&control_ns)))
|
||||
.collect();
|
||||
info!(
|
||||
"Applying {} Argo application object(s) into control namespace '{}'.",
|
||||
yamls.len(),
|
||||
control_ns
|
||||
);
|
||||
k8s_client
|
||||
.apply_yaml_many(&yamls, Some(control_ns.as_str()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Failed applying Argo CRs: {e}")))?;
|
||||
|
||||
Ok(Outcome::success_with_details(
|
||||
format!(
|
||||
"ArgoCD {} {}",
|
||||
self.argo_apps.len(),
|
||||
if self.argo_apps.len() == 1 {
|
||||
"application"
|
||||
} else {
|
||||
"applications"
|
||||
}
|
||||
),
|
||||
vec![
|
||||
format!("control_namespace={}", control_ns),
|
||||
format!("argo ui: http://{}", domain),
|
||||
],
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
@@ -81,7 +156,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
Version::from("0.1.0").unwrap()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
@@ -89,39 +164,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl ArgoInterpret {
|
||||
pub async fn get_host_domain(
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
openshift: bool,
|
||||
) -> Result<String, InterpretError> {
|
||||
//This should be the job of the topology to determine if we are in
|
||||
//openshift, potentially we need on openshift topology the same way we create a
|
||||
//localhosttopology
|
||||
match openshift {
|
||||
true => {
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => return Ok(domain.to_string()),
|
||||
None => return Err(InterpretError::new("Could not find domain".to_string())),
|
||||
}
|
||||
}
|
||||
false => {
|
||||
todo!()
|
||||
}
|
||||
};
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,6 +199,9 @@ global:
|
||||
## Used for ingresses, certificates, SSO, notifications, etc.
|
||||
domain: {domain}
|
||||
|
||||
securityContext:
|
||||
runAsUser: null
|
||||
|
||||
# -- Runtime class name for all components
|
||||
runtimeClassName: ""
|
||||
|
||||
@@ -467,6 +513,13 @@ redis:
|
||||
# -- Redis name
|
||||
name: redis
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
|
||||
securityContext:
|
||||
runAsUser: null
|
||||
|
||||
|
||||
## Redis image
|
||||
image:
|
||||
# -- Redis repository
|
||||
|
||||
@@ -5,8 +5,8 @@ pub use endpoint::*;
|
||||
mod monitoring;
|
||||
pub use monitoring::*;
|
||||
|
||||
mod continuous_delivery;
|
||||
pub use continuous_delivery::*;
|
||||
mod packaging_deployment;
|
||||
pub use packaging_deployment::*;
|
||||
|
||||
mod helm_argocd_score;
|
||||
pub use helm_argocd_score::*;
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use crate::modules::application::{Application, ApplicationFeature};
|
||||
use crate::modules::application::{
|
||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||
};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||
use crate::topology::MultiTargetTopology;
|
||||
@@ -43,7 +45,10 @@ impl<
|
||||
+ std::fmt::Debug,
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
topology: &T,
|
||||
) -> Result<InstallationOutcome, InstallationError> {
|
||||
info!("Ensuring monitoring is available for application");
|
||||
let namespace = topology
|
||||
.get_tenant_config()
|
||||
@@ -103,7 +108,7 @@ impl<
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(())
|
||||
Ok(InstallationOutcome::success())
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
|
||||
@@ -10,12 +10,11 @@ use crate::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::application::{
|
||||
ApplicationFeature, HelmPackage, OCICompliant,
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
|
||||
},
|
||||
};
|
||||
|
||||
@@ -47,11 +46,11 @@ use crate::{
|
||||
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
||||
/// - Kubernetes for runtime orchestration
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
|
||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
|
||||
pub application: Arc<A>,
|
||||
}
|
||||
|
||||
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
|
||||
async fn deploy_to_local_k3d(
|
||||
&self,
|
||||
app_name: String,
|
||||
@@ -137,16 +136,24 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
|
||||
#[async_trait]
|
||||
impl<
|
||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||
A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||
> ApplicationFeature<T> for PackagingDeployment<A>
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
topology: &T,
|
||||
) -> Result<InstallationOutcome, InstallationError> {
|
||||
let image = self.application.image_name();
|
||||
let domain = topology
|
||||
|
||||
let domain = if topology.current_target() == DeploymentTarget::Production {
|
||||
self.application.dns()
|
||||
} else {
|
||||
topology
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
.map_err(|e| e.to_string())?
|
||||
};
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
@@ -191,7 +198,7 @@ impl<
|
||||
openshift: true,
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
version: Version::from("0.2.1").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||
values_overrides: None,
|
||||
@@ -205,7 +212,11 @@ impl<
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
|
||||
Ok(InstallationOutcome::success_with_details(vec![format!(
|
||||
"{}: http://{domain}",
|
||||
self.application.name()
|
||||
)]))
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"ContinuousDelivery".to_string()
|
||||
@@ -1,7 +1,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::modules::application::{Application, ApplicationFeature};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::application::{
|
||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||
};
|
||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
@@ -25,7 +26,7 @@ use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RHOBMonitoring {
|
||||
pub struct Monitoring {
|
||||
pub application: Arc<dyn Application>,
|
||||
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||
}
|
||||
@@ -41,9 +42,12 @@ impl<
|
||||
+ Ingress
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||
> ApplicationFeature<T> for RHOBMonitoring
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
topology: &T,
|
||||
) -> Result<InstallationOutcome, InstallationError> {
|
||||
info!("Ensuring monitoring is available for application");
|
||||
let namespace = topology
|
||||
.get_tenant_config()
|
||||
@@ -59,12 +63,13 @@ impl<
|
||||
application: self.application.clone(),
|
||||
receivers: self.alert_receiver.clone(),
|
||||
};
|
||||
let domain = topology
|
||||
.get_domain("ntfy")
|
||||
.await
|
||||
.map_err(|e| format!("could not get domain {e}"))?;
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: topology
|
||||
.get_domain("ntfy")
|
||||
.await
|
||||
.map_err(|e| format!("Could not get domain {e}"))?,
|
||||
host: domain.clone(),
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
@@ -86,27 +91,33 @@ impl<
|
||||
.replace("=", "");
|
||||
|
||||
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||
|
||||
let ntfy_receiver = WebhookReceiver {
|
||||
name: "ntfy-webhook".to_string(),
|
||||
url: Url::Url(
|
||||
url::Url::parse(
|
||||
format!(
|
||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||
namespace.clone()
|
||||
"http://{domain}/{}?auth={ntfy_default_auth_param}",
|
||||
self.application.name()
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
debug!(
|
||||
"ntfy webhook receiver \n{:#?}\nntfy topic: {}",
|
||||
ntfy_receiver.clone(),
|
||||
self.application.name()
|
||||
);
|
||||
alerting_score.receivers.push(Box::new(ntfy_receiver));
|
||||
alerting_score
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
Ok(())
|
||||
Ok(InstallationOutcome::success_with_details(vec![format!(
|
||||
"ntfy topic: {}",
|
||||
self.application.name()
|
||||
)]))
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"Monitoring".to_string()
|
||||
|
||||
@@ -2,6 +2,7 @@ mod feature;
|
||||
pub mod features;
|
||||
pub mod oci;
|
||||
mod rust;
|
||||
mod webapp;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use feature::*;
|
||||
@@ -24,8 +25,8 @@ use harmony_types::id::Id;
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ApplicationFeatureStatus {
|
||||
Installing,
|
||||
Installed,
|
||||
Failed { details: String },
|
||||
Installed { details: Vec<String> },
|
||||
Failed { message: String },
|
||||
}
|
||||
|
||||
pub trait Application: std::fmt::Debug + Send + Sync {
|
||||
@@ -65,27 +66,32 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
||||
.unwrap();
|
||||
|
||||
let _ = match feature.ensure_installed(topology).await {
|
||||
Ok(()) => {
|
||||
Ok(outcome) => {
|
||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||
topology: topology.name().into(),
|
||||
application: self.application.name(),
|
||||
feature: feature.name(),
|
||||
status: ApplicationFeatureStatus::Installed,
|
||||
status: ApplicationFeatureStatus::Installed {
|
||||
details: match outcome {
|
||||
InstallationOutcome::Success { details } => details,
|
||||
InstallationOutcome::Noop => vec![],
|
||||
},
|
||||
},
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
Err(msg) => {
|
||||
Err(error) => {
|
||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||
topology: topology.name().into(),
|
||||
application: self.application.name(),
|
||||
feature: feature.name(),
|
||||
status: ApplicationFeatureStatus::Failed {
|
||||
details: msg.clone(),
|
||||
message: error.to_string(),
|
||||
},
|
||||
})
|
||||
.unwrap();
|
||||
return Err(InterpretError::new(format!(
|
||||
"Application Interpret failed to install feature : {msg}"
|
||||
"Application Interpret failed to install feature : {error}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -10,12 +10,13 @@ use dockerfile_builder::Dockerfile;
|
||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||
use futures_util::StreamExt;
|
||||
use log::{debug, info, log_enabled};
|
||||
use log::{debug, error, info, log_enabled, trace, warn};
|
||||
use serde::Serialize;
|
||||
use tar::{Builder, Header};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::modules::application::webapp::Webapp;
|
||||
use crate::{score::Score, topology::Topology};
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
@@ -60,6 +61,10 @@ pub struct RustWebapp {
|
||||
pub project_root: PathBuf,
|
||||
pub service_port: u32,
|
||||
pub framework: Option<RustWebFramework>,
|
||||
/// Host name that will be used in production environment.
|
||||
///
|
||||
/// This is the place to put the public host name if this is a public facing webapp.
|
||||
pub dns: String,
|
||||
}
|
||||
|
||||
impl Application for RustWebapp {
|
||||
@@ -68,6 +73,12 @@ impl Application for RustWebapp {
|
||||
}
|
||||
}
|
||||
|
||||
impl Webapp for RustWebapp {
|
||||
fn dns(&self) -> String {
|
||||
self.dns.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(
|
||||
@@ -162,7 +173,7 @@ impl RustWebapp {
|
||||
&self,
|
||||
image_name: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
debug!("Generating Dockerfile for '{}'", self.name);
|
||||
info!("Generating Dockerfile for '{}'", self.name);
|
||||
let dockerfile = self.get_or_build_dockerfile();
|
||||
let quiet = !log_enabled!(log::Level::Debug);
|
||||
match dockerfile
|
||||
@@ -195,7 +206,40 @@ impl RustWebapp {
|
||||
);
|
||||
|
||||
while let Some(msg) = image_build_stream.next().await {
|
||||
debug!("Message: {msg:?}");
|
||||
trace!("Got bollard msg {msg:?}");
|
||||
match msg {
|
||||
Ok(msg) => {
|
||||
if let Some(progress) = msg.progress_detail {
|
||||
info!(
|
||||
"Build progress {}/{}",
|
||||
progress.current.unwrap_or(0),
|
||||
progress.total.unwrap_or(0)
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(mut log) = msg.stream {
|
||||
if log.ends_with('\n') {
|
||||
log.pop();
|
||||
if log.ends_with('\r') {
|
||||
log.pop();
|
||||
}
|
||||
}
|
||||
info!("{log}");
|
||||
}
|
||||
|
||||
if let Some(error) = msg.error {
|
||||
warn!("Build error : {error:?}");
|
||||
}
|
||||
|
||||
if let Some(error) = msg.error_detail {
|
||||
warn!("Build error : {error:?}");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Build failed : {e}");
|
||||
return Err(format!("Build failed : {e}").into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(image_name.to_string())
|
||||
@@ -427,52 +471,53 @@ impl RustWebapp {
|
||||
|
||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||
|
||||
let app_name = &self.name;
|
||||
let service_port = self.service_port;
|
||||
// Create Chart.yaml
|
||||
let chart_yaml = format!(
|
||||
r#"
|
||||
apiVersion: v2
|
||||
name: {}
|
||||
description: A Helm chart for the {} web application.
|
||||
name: {chart_name}
|
||||
description: A Helm chart for the {app_name} web application.
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "{}"
|
||||
version: 0.2.1
|
||||
appVersion: "{image_tag}"
|
||||
"#,
|
||||
chart_name, self.name, image_tag
|
||||
);
|
||||
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
|
||||
|
||||
// Create values.yaml
|
||||
let values_yaml = format!(
|
||||
r#"
|
||||
# Default values for {}.
|
||||
# Default values for {chart_name}.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: {}
|
||||
repository: {image_repo}
|
||||
pullPolicy: IfNotPresent
|
||||
# Overridden by the chart's appVersion
|
||||
tag: "{}"
|
||||
tag: "{image_tag}"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: {}
|
||||
port: {service_port}
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
tls: true
|
||||
# Annotations for cert-manager to handle SSL.
|
||||
annotations:
|
||||
# Add other annotations like nginx ingress class if needed
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
hosts:
|
||||
- host: {}
|
||||
- host: {domain}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
"#,
|
||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
||||
);
|
||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||
|
||||
@@ -549,7 +594,11 @@ spec:
|
||||
);
|
||||
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
||||
|
||||
let service_port = self.service_port;
|
||||
|
||||
// Create templates/ingress.yaml
|
||||
// TODO get issuer name and tls config from topology as it may be different from one
|
||||
// cluster to another, also from one version to another
|
||||
let ingress_yaml = format!(
|
||||
r#"
|
||||
{{{{- if $.Values.ingress.enabled -}}}}
|
||||
@@ -562,13 +611,11 @@ metadata:
|
||||
spec:
|
||||
{{{{- if $.Values.ingress.tls }}}}
|
||||
tls:
|
||||
{{{{- range $.Values.ingress.tls }}}}
|
||||
- hosts:
|
||||
{{{{- range .hosts }}}}
|
||||
- {{{{ . | quote }}}}
|
||||
- secretName: {{{{ include "chart.fullname" . }}}}-tls
|
||||
hosts:
|
||||
{{{{- range $.Values.ingress.hosts }}}}
|
||||
- {{{{ .host | quote }}}}
|
||||
{{{{- end }}}}
|
||||
secretName: {{{{ .secretName }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
rules:
|
||||
{{{{- range $.Values.ingress.hosts }}}}
|
||||
@@ -582,12 +629,11 @@ spec:
|
||||
service:
|
||||
name: {{{{ include "chart.fullname" $ }}}}
|
||||
port:
|
||||
number: {{{{ $.Values.service.port | default {} }}}}
|
||||
number: {{{{ $.Values.service.port | default {service_port} }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
"#,
|
||||
self.service_port
|
||||
);
|
||||
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
|
||||
|
||||
|
||||
7
harmony/src/modules/application/webapp.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
use super::Application;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Webapp: Application {
|
||||
fn dns(&self) -> String;
|
||||
}
|
||||
203
harmony/src/modules/argocd/mod.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use log::{debug, info};
|
||||
|
||||
use crate::{interpret::InterpretError, topology::k8s::K8sClient};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ArgoScope {
|
||||
ClusterWide(String),
|
||||
NamespaceScoped(String),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DiscoveredArgo {
|
||||
pub control_namespace: String,
|
||||
pub scope: ArgoScope,
|
||||
pub has_crds: bool,
|
||||
pub has_applicationset: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ArgoDeploymentType {
|
||||
NotInstalled,
|
||||
AvailableInDesiredNamespace(String),
|
||||
InstalledClusterWide(String),
|
||||
InstalledNamespaceScoped(String),
|
||||
}
|
||||
|
||||
pub async fn discover_argo_all(
|
||||
k8s: &Arc<K8sClient>,
|
||||
) -> Result<Vec<DiscoveredArgo>, InterpretError> {
|
||||
use log::{debug, info, trace, warn};
|
||||
|
||||
trace!("Starting Argo discovery");
|
||||
|
||||
// CRDs
|
||||
let mut has_crds = true;
|
||||
let required_crds = vec!["applications.argoproj.io", "appprojects.argoproj.io"];
|
||||
trace!("Checking required Argo CRDs: {:?}", required_crds);
|
||||
|
||||
for crd in required_crds {
|
||||
trace!("Verifying CRD presence: {crd}");
|
||||
let crd_exists = k8s.has_crd(crd).await.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to verify existence of CRD {crd}: {e}"))
|
||||
})?;
|
||||
|
||||
debug!("CRD {crd} exists: {crd_exists}");
|
||||
if !crd_exists {
|
||||
info!(
|
||||
"Missing Argo CRD {crd}, looks like Argo CD is not installed (or partially installed)"
|
||||
);
|
||||
has_crds = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/part-of=argocd"
|
||||
);
|
||||
let mut candidate_namespaces = k8s
|
||||
.list_namespaces_with_healthy_deployments("app.kubernetes.io/part-of=argocd")
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?;
|
||||
trace!(
|
||||
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/name=argo-cd"
|
||||
);
|
||||
candidate_namespaces.append(
|
||||
&mut k8s
|
||||
.list_namespaces_with_healthy_deployments("app.kubernetes.io/name=argo-cd")
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?,
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Discovered {} candidate namespace(s) for Argo CD: {:?}",
|
||||
candidate_namespaces.len(),
|
||||
candidate_namespaces
|
||||
);
|
||||
|
||||
let mut found = Vec::new();
|
||||
for ns in candidate_namespaces {
|
||||
trace!("Evaluating namespace '{ns}' for Argo CD instance");
|
||||
|
||||
// Require the application-controller to be healthy (sanity check)
|
||||
trace!(
|
||||
"Checking healthy deployment with label app.kubernetes.io/name=argocd-application-controller in namespace '{ns}'"
|
||||
);
|
||||
let controller_ok = k8s
|
||||
.has_healthy_deployment_with_label(
|
||||
&ns,
|
||||
"app.kubernetes.io/name=argocd-application-controller",
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(
|
||||
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||
);
|
||||
false
|
||||
}) || k8s
|
||||
.has_healthy_deployment_with_label(
|
||||
&ns,
|
||||
"app.kubernetes.io/component=controller",
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(
|
||||
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||
);
|
||||
false
|
||||
});
|
||||
debug!("Namespace '{ns}': application-controller healthy = {controller_ok}");
|
||||
|
||||
if !controller_ok {
|
||||
trace!("Skipping namespace '{ns}' because application-controller is not healthy");
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
|
||||
let scope = match k8s.is_argocd_cluster_wide(&ns).await {
|
||||
Ok(true) => {
|
||||
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
|
||||
ArgoScope::ClusterWide(ns.to_string())
|
||||
}
|
||||
Ok(false) => {
|
||||
debug!("Namespace '{ns}' identified as namespace-scoped Argo CD control plane");
|
||||
ArgoScope::NamespaceScoped(ns.to_string())
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to determine Argo CD scope for namespace '{ns}': {e}. Assuming namespace-scoped."
|
||||
);
|
||||
ArgoScope::NamespaceScoped(ns.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
trace!("Checking optional ApplicationSet CRD (applicationsets.argoproj.io)");
|
||||
let has_applicationset = match k8s.has_crd("applicationsets.argoproj.io").await {
|
||||
Ok(v) => {
|
||||
debug!("applicationsets.argoproj.io present: {v}");
|
||||
v
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to check applicationsets.argoproj.io CRD: {e}. Assuming absent.");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
let argo = DiscoveredArgo {
|
||||
control_namespace: ns.clone(),
|
||||
scope,
|
||||
has_crds,
|
||||
has_applicationset,
|
||||
};
|
||||
|
||||
debug!("Discovered Argo instance in '{ns}': {argo:?}");
|
||||
found.push(argo);
|
||||
}
|
||||
|
||||
if found.is_empty() {
|
||||
info!("No Argo CD installations discovered");
|
||||
} else {
|
||||
info!(
|
||||
"Argo CD discovery complete: {} instance(s) found",
|
||||
found.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(found)
|
||||
}
|
||||
|
||||
pub async fn detect_argo_deployment_type(
|
||||
k8s: &Arc<K8sClient>,
|
||||
desired_namespace: &str,
|
||||
) -> Result<ArgoDeploymentType, InterpretError> {
|
||||
let discovered = discover_argo_all(k8s).await?;
|
||||
debug!("Discovered argo instances {discovered:?}");
|
||||
|
||||
if discovered.is_empty() {
|
||||
return Ok(ArgoDeploymentType::NotInstalled);
|
||||
}
|
||||
|
||||
if let Some(d) = discovered
|
||||
.iter()
|
||||
.find(|d| d.control_namespace == desired_namespace)
|
||||
{
|
||||
return Ok(ArgoDeploymentType::AvailableInDesiredNamespace(
|
||||
d.control_namespace.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(d) = discovered
|
||||
.iter()
|
||||
.find(|d| matches!(d.scope, ArgoScope::ClusterWide(_)))
|
||||
{
|
||||
return Ok(ArgoDeploymentType::InstalledClusterWide(
|
||||
d.control_namespace.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(ArgoDeploymentType::InstalledNamespaceScoped(
|
||||
discovered[0].control_namespace.clone(),
|
||||
))
|
||||
}
|
||||
@@ -69,17 +69,14 @@ impl DhcpInterpret {
|
||||
|
||||
dhcp_server.set_pxe_options(pxe_options).await?;
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!(
|
||||
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
|
||||
self.score.boot_filename,
|
||||
self.score.boot_filename,
|
||||
self.score.filename,
|
||||
self.score.filename64,
|
||||
self.score.filenameipxe
|
||||
),
|
||||
))
|
||||
Ok(Outcome::success(format!(
|
||||
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
|
||||
self.score.boot_filename,
|
||||
self.score.boot_filename,
|
||||
self.score.filename,
|
||||
self.score.filename64,
|
||||
self.score.filenameipxe
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,8 +119,7 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
|
||||
|
||||
topology.commit_config().await?;
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
Ok(Outcome::success(
|
||||
"Dhcp Interpret execution successful".to_string(),
|
||||
))
|
||||
}
|
||||
@@ -197,10 +193,10 @@ impl DhcpHostBindingInterpret {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!("Dhcp Interpret registered {} entries", number_new_entries),
|
||||
))
|
||||
Ok(Outcome::success(format!(
|
||||
"Dhcp Interpret registered {} entries",
|
||||
number_new_entries
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,12 +232,9 @@ impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret {
|
||||
|
||||
topology.commit_config().await?;
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!(
|
||||
"Dhcp Host Binding Interpret execution successful on {} hosts",
|
||||
self.score.host_binding.len()
|
||||
),
|
||||
))
|
||||
Ok(Outcome::success(format!(
|
||||
"Dhcp Host Binding Interpret execution successful on {} hosts",
|
||||
self.score.host_binding.len()
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,8 +55,7 @@ impl DnsInterpret {
|
||||
dns.register_dhcp_leases(register).await?;
|
||||
}
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
Ok(Outcome::success(
|
||||
"DNS Interpret execution successfull".to_string(),
|
||||
))
|
||||
}
|
||||
@@ -68,13 +67,10 @@ impl DnsInterpret {
|
||||
let entries = &self.score.dns_entries;
|
||||
dns_server.ensure_hosts_registered(entries.clone()).await?;
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!(
|
||||
"DnsInterpret registered {} hosts successfully",
|
||||
entries.len()
|
||||
),
|
||||
))
|
||||
Ok(Outcome::success(format!(
|
||||
"DnsInterpret registered {} hosts successfully",
|
||||
entries.len()
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,8 +107,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
|
||||
|
||||
topology.commit_config().await?;
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
Ok(Outcome::success(
|
||||
"Dns Interpret execution successful".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -197,13 +197,10 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
self.score.release_name, ns
|
||||
);
|
||||
|
||||
return Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!(
|
||||
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
||||
self.score.release_name
|
||||
),
|
||||
));
|
||||
return Ok(Outcome::success(format!(
|
||||
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
||||
self.score.release_name
|
||||
)));
|
||||
} else {
|
||||
info!(
|
||||
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
||||
@@ -228,18 +225,18 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
};
|
||||
|
||||
match status {
|
||||
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!("Helm Chart {} deployed", self.score.release_name),
|
||||
)),
|
||||
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
|
||||
InterpretStatus::RUNNING,
|
||||
format!("Helm Chart {} pending install...", self.score.release_name),
|
||||
)),
|
||||
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
|
||||
InterpretStatus::RUNNING,
|
||||
format!("Helm Chart {} pending upgrade...", self.score.release_name),
|
||||
)),
|
||||
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!(
|
||||
"Helm Chart {} deployed",
|
||||
self.score.release_name
|
||||
))),
|
||||
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!(
|
||||
"Helm Chart {} pending install...",
|
||||
self.score.release_name
|
||||
))),
|
||||
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!(
|
||||
"Helm Chart {} pending upgrade...",
|
||||
self.score.release_name
|
||||
))),
|
||||
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
|
||||
"Helm Chart {} installation failed",
|
||||
self.score.release_name
|
||||
|
||||
@@ -90,12 +90,12 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
// refactoring to do it now
|
||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
storage_controller: _,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
chipset: _,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
management_interface: _,
|
||||
host_uuid,
|
||||
} = host;
|
||||
|
||||
@@ -133,10 +133,9 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
},
|
||||
)
|
||||
.await;
|
||||
Ok(Outcome {
|
||||
status: InterpretStatus::SUCCESS,
|
||||
message: "Discovery process completed successfully".to_string(),
|
||||
})
|
||||
Ok(Outcome::success(
|
||||
"Discovery process completed successfully".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_macros::ingress_path;
|
||||
use harmony_types::id::Id;
|
||||
use k8s_openapi::api::networking::v1::Ingress;
|
||||
use log::{debug, trace};
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::{
|
||||
interpret::Interpret,
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
@@ -57,7 +61,7 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
|
||||
let ingress_class = match self.ingress_class_name.clone() {
|
||||
Some(ingress_class_name) => ingress_class_name,
|
||||
None => format!("\"default\""),
|
||||
None => "\"default\"".to_string(),
|
||||
};
|
||||
|
||||
let ingress = json!(
|
||||
@@ -97,11 +101,12 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
"Successfully built Ingress for host {:?}",
|
||||
ingress.metadata.name
|
||||
);
|
||||
Box::new(K8sResourceInterpret {
|
||||
score: K8sResourceScore::single(
|
||||
ingress.clone(),
|
||||
self.namespace.clone().map(|f| f.to_string()),
|
||||
),
|
||||
|
||||
Box::new(K8sIngressInterpret {
|
||||
ingress,
|
||||
service: self.name.to_string(),
|
||||
namespace: self.namespace.clone().map(|f| f.to_string()),
|
||||
host: self.host.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -109,3 +114,62 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
format!("{} K8sIngressScore", self.name)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(std::fmt::Debug)]
|
||||
struct K8sIngressInterpret {
|
||||
ingress: Ingress,
|
||||
service: String,
|
||||
namespace: Option<String>,
|
||||
host: fqdn::FQDN,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient> Interpret<T> for K8sIngressInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let result = K8sResourceInterpret {
|
||||
score: K8sResourceScore::single(self.ingress.clone(), self.namespace.clone()),
|
||||
}
|
||||
.execute(inventory, topology)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(outcome) => match outcome.status {
|
||||
InterpretStatus::SUCCESS => {
|
||||
let details = match &self.namespace {
|
||||
Some(namespace) => {
|
||||
vec![format!(
|
||||
"{} ({namespace}): http://{}",
|
||||
self.service, self.host
|
||||
)]
|
||||
}
|
||||
None => vec![format!("{}: {}", self.service, self.host)],
|
||||
};
|
||||
|
||||
Ok(Outcome::success_with_details(outcome.message, details))
|
||||
}
|
||||
_ => Ok(outcome),
|
||||
},
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::K8sIngress
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
Version::from("0.0.1").unwrap()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,3 +17,4 @@ pub mod prometheus;
|
||||
pub mod storage;
|
||||
pub mod tenant;
|
||||
pub mod tftp;
|
||||
pub mod argocd;
|
||||
|
||||
@@ -35,6 +35,24 @@ pub struct DiscordWebhook {
|
||||
#[async_trait]
|
||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||
let ns = sender.namespace.clone();
|
||||
let secret_name = format!("{}-secret", self.name.clone());
|
||||
let webhook_key = format!("{}", self.url.clone());
|
||||
|
||||
let mut string_data = BTreeMap::new();
|
||||
string_data.insert("webhook-url".to_string(), webhook_key.clone());
|
||||
|
||||
let secret = Secret {
|
||||
metadata: kube::core::ObjectMeta {
|
||||
name: Some(secret_name.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
string_data: Some(string_data),
|
||||
type_: Some("Opaque".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let _ = sender.client.apply(&secret, Some(&ns)).await;
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
"route": {
|
||||
@@ -43,9 +61,14 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
"receivers": [
|
||||
{
|
||||
"name": self.name,
|
||||
"webhookConfigs": [
|
||||
"discordConfigs": [
|
||||
{
|
||||
"url": self.url,
|
||||
"apiURL": {
|
||||
"name": secret_name,
|
||||
"key": "webhook-url",
|
||||
},
|
||||
"title": "{{ template \"discord.default.title\" . }}",
|
||||
"message": "{{ template \"discord.default.message\" . }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -43,6 +43,11 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
||||
"webhookConfigs": [
|
||||
{
|
||||
"url": self.url,
|
||||
"httpConfig": {
|
||||
"tlsConfig": {
|
||||
"insecureSkipVerify": true
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -68,7 +68,9 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
||||
PreparationOutcome::Success { details: _ } => {
|
||||
Ok(Outcome::success("Prometheus installed".into()))
|
||||
}
|
||||
PreparationOutcome::Noop => Ok(Outcome::noop()),
|
||||
PreparationOutcome::Noop => {
|
||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
||||
}
|
||||
},
|
||||
Err(err) => Err(InterpretError::from(err)),
|
||||
}
|
||||
|
||||
@@ -70,7 +70,9 @@ impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret
|
||||
PreparationOutcome::Success { details: _ } => {
|
||||
Ok(Outcome::success("Prometheus installed".into()))
|
||||
}
|
||||
PreparationOutcome::Noop => Ok(Outcome::noop()),
|
||||
PreparationOutcome::Noop => {
|
||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
||||
}
|
||||
},
|
||||
Err(err) => Err(InterpretError::from(err)),
|
||||
}
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
LabelSelector, PrometheusSpec,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||
|
||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
|
||||
@@ -4,4 +4,5 @@ pub mod application_monitoring;
|
||||
pub mod grafana;
|
||||
pub mod kube_prometheus;
|
||||
pub mod ntfy;
|
||||
pub mod okd;
|
||||
pub mod prometheus;
|
||||
|
||||
@@ -113,7 +113,13 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
||||
.await?;
|
||||
info!("user added");
|
||||
|
||||
Ok(Outcome::success("Ntfy installed".to_string()))
|
||||
Ok(Outcome::success_with_details(
|
||||
"Ntfy installed".to_string(),
|
||||
vec![format!(
|
||||
"Ntfy ({}): http://{}",
|
||||
self.score.namespace, self.score.host
|
||||
)],
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
|
||||
149
harmony/src/modules/monitoring/okd/enable_user_workload.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use k8s_openapi::api::core::v1::ConfigMap;
|
||||
use kube::api::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct OpenshiftUserWorkloadMonitoring {}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for OpenshiftUserWorkloadMonitoring {
|
||||
fn name(&self) -> String {
|
||||
"OpenshiftUserWorkloadMonitoringScore".to_string()
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(OpenshiftUserWorkloadMonitoringInterpret {})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct OpenshiftUserWorkloadMonitoringInterpret {}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let client = topology.k8s_client().await.unwrap();
|
||||
self.update_cluster_monitoring_config_cm(&client).await?;
|
||||
self.update_user_workload_monitoring_config_cm(&client)
|
||||
.await?;
|
||||
self.verify_user_workload(&client).await?;
|
||||
Ok(Outcome::success(
|
||||
"successfully enabled user-workload-monitoring".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OpenshiftUserWorkloadMonitoring")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenshiftUserWorkloadMonitoringInterpret {
|
||||
pub async fn update_cluster_monitoring_config_cm(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut data = BTreeMap::new();
|
||||
data.insert(
|
||||
"config.yaml".to_string(),
|
||||
r#"
|
||||
enableUserWorkload: true
|
||||
alertmanagerMain:
|
||||
enableUserAlertmanagerConfig: true
|
||||
"#
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
let cm = ConfigMap {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("cluster-monitoring-config".to_string()),
|
||||
namespace: Some("openshift-monitoring".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
data: Some(data),
|
||||
..Default::default()
|
||||
};
|
||||
client.apply(&cm, Some("openshift-monitoring")).await?;
|
||||
|
||||
Ok(Outcome::success(
|
||||
"updated cluster-monitoring-config-map".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn update_user_workload_monitoring_config_cm(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut data = BTreeMap::new();
|
||||
data.insert(
|
||||
"config.yaml".to_string(),
|
||||
r#"
|
||||
alertmanager:
|
||||
enabled: true
|
||||
enableAlertmanagerConfig: true
|
||||
"#
|
||||
.to_string(),
|
||||
);
|
||||
let cm = ConfigMap {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("user-workload-monitoring-config".to_string()),
|
||||
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
data: Some(data),
|
||||
..Default::default()
|
||||
};
|
||||
client
|
||||
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(
|
||||
"updated openshift-user-monitoring-config-map".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn verify_user_workload(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let namespace = "openshift-user-workload-monitoring";
|
||||
let alertmanager_name = "alertmanager-user-workload-0";
|
||||
let prometheus_name = "prometheus-user-workload-0";
|
||||
client
|
||||
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
||||
.await?;
|
||||
client
|
||||
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"pods: {}, {} ready in ns: {}",
|
||||
alertmanager_name, prometheus_name, namespace
|
||||
)))
|
||||
}
|
||||
}
|
||||
1
harmony/src/modules/monitoring/okd/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod enable_user_workload;
|
||||
@@ -1,19 +1,19 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::{error, info, warn};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore},
|
||||
modules::inventory::DiscoverHostForRoleScore,
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
|
||||
// - This score exposes/ensures the default inventory assets and waits for discoveries.
|
||||
@@ -109,12 +109,9 @@ When you can dig them, confirm to continue.
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!(
|
||||
"Found and assigned bootstrap node: {}",
|
||||
bootstrap_host.unwrap().summary()
|
||||
),
|
||||
))
|
||||
Ok(Outcome::success(format!(
|
||||
"Found and assigned bootstrap node: {}",
|
||||
bootstrap_host.unwrap().summary()
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +1,13 @@
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, error, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||
|
||||
use crate::{
|
||||
config::secret::{RedhatSecret, SshKeyPair},
|
||||
data::{FileContent, FilePath, Version},
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
instrumentation::{HarmonyEvent, instrument},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
okd::{
|
||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||
@@ -28,6 +16,15 @@ use crate::{
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
use std::path::PathBuf;
|
||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// Step 02: Bootstrap
|
||||
// - Select bootstrap node (from discovered set).
|
||||
@@ -313,7 +310,7 @@ impl OKDSetup02BootstrapInterpret {
|
||||
info!("[Bootstrap] Rebooting bootstrap node via SSH");
|
||||
// TODO reboot programatically, there are some logical checks and refactoring to do such as
|
||||
// accessing the bootstrap node config (ip address) from the inventory
|
||||
let confirmation = inquire::Confirm::new(
|
||||
let _ = inquire::Confirm::new(
|
||||
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
|
||||
)
|
||||
.prompt()
|
||||
@@ -379,9 +376,6 @@ impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
|
||||
self.reboot_target().await?;
|
||||
self.wait_for_bootstrap_complete().await?;
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
"Bootstrap phase complete".into(),
|
||||
))
|
||||
Ok(Outcome::success("Bootstrap phase complete".into()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,3 @@
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
@@ -19,6 +11,12 @@ use crate::{
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// Step 03: Control Plane
|
||||
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
|
||||
@@ -269,8 +267,7 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
||||
// the `wait-for bootstrap-complete` command.
|
||||
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
||||
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
Ok(Outcome::success(
|
||||
"Control plane provisioning has been successfully initiated.".into(),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -1,33 +1,17 @@
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, error, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
config::secret::{RedhatSecret, SshKeyPair},
|
||||
data::{FileContent, FilePath, Version},
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
instrumentation::{HarmonyEvent, instrument},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
okd::{
|
||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||
},
|
||||
},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// Step 04: Workers
|
||||
// - Render per-MAC PXE & ignition for workers; join nodes.
|
||||
@@ -94,9 +78,6 @@ impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
||||
_topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.render_and_reboot().await?;
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
"Workers provisioned".into(),
|
||||
))
|
||||
Ok(Outcome::success("Workers provisioned".into()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,33 +1,16 @@
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, error, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
config::secret::{RedhatSecret, SshKeyPair},
|
||||
data::{FileContent, FilePath, Version},
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
instrumentation::{HarmonyEvent, instrument},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
okd::{
|
||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
};
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// Step 05: Sanity Check
|
||||
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
|
||||
@@ -93,9 +76,6 @@ impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
|
||||
_topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.run_checks().await?;
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
"Sanity checks passed".into(),
|
||||
))
|
||||
Ok(Outcome::success("Sanity checks passed".into()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +1,15 @@
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, error, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
config::secret::{RedhatSecret, SshKeyPair},
|
||||
data::{FileContent, FilePath, Version},
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
instrumentation::{HarmonyEvent, instrument},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
okd::{
|
||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||
},
|
||||
},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
|
||||
// Step 06: Installation Report
|
||||
@@ -93,9 +76,6 @@ impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
|
||||
_topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.generate().await?;
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
"Installation report generated".into(),
|
||||
))
|
||||
Ok(Outcome::success("Installation report generated".into()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ pub fn alert_pod_not_ready() -> PrometheusAlertRule {
|
||||
PrometheusAlertRule {
|
||||
alert: "PodNotReady".into(),
|
||||
expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(),
|
||||
r#for: Some("2m".into()),
|
||||
r#for: Some("30s".into()),
|
||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||
annotations: HashMap::from([
|
||||
("summary".into(), "Pod is not ready".into()),
|
||||
|
||||
@@ -12,9 +12,6 @@ use std::process::Command;
|
||||
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
||||
Alertmanager, AlertmanagerSpec,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
|
||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||
GrafanaDatasourceSpec, GrafanaSpec,
|
||||
@@ -25,13 +22,8 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
|
||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
|
||||
build_prom_role, build_prom_rolebinding, build_prom_service_account,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
||||
ServiceMonitor, ServiceMonitorSpec,
|
||||
};
|
||||
|
||||
@@ -178,10 +178,10 @@ fn handle_events() {
|
||||
ApplicationFeatureStatus::Installing => {
|
||||
info!("Installing feature '{feature}' for '{application}'...");
|
||||
}
|
||||
ApplicationFeatureStatus::Installed => {
|
||||
ApplicationFeatureStatus::Installed { details: _ } => {
|
||||
info!(status = "finished"; "Feature '{feature}' installed");
|
||||
}
|
||||
ApplicationFeatureStatus::Failed { details } => {
|
||||
ApplicationFeatureStatus::Failed { message: details } => {
|
||||
error!(status = "failed"; "Feature '{feature}' installation failed: {details}");
|
||||
}
|
||||
},
|
||||
|
||||
56
harmony_cli/src/cli_reporter.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use harmony::{
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
modules::application::ApplicationFeatureStatus,
|
||||
};
|
||||
|
||||
use crate::theme;
|
||||
|
||||
pub fn init() {
|
||||
let details: Mutex<Vec<String>> = Mutex::new(vec![]);
|
||||
|
||||
instrumentation::subscribe("Harmony CLI Reporter", {
|
||||
move |event| {
|
||||
let mut details = details.lock().unwrap();
|
||||
|
||||
match event {
|
||||
HarmonyEvent::InterpretExecutionFinished {
|
||||
execution_id: _,
|
||||
topology: _,
|
||||
interpret: _,
|
||||
score: _,
|
||||
outcome: Ok(outcome),
|
||||
} => {
|
||||
if outcome.status == harmony::interpret::InterpretStatus::SUCCESS {
|
||||
details.extend(outcome.details.clone());
|
||||
}
|
||||
}
|
||||
HarmonyEvent::ApplicationFeatureStateChanged {
|
||||
topology: _,
|
||||
application: _,
|
||||
feature: _,
|
||||
status:
|
||||
ApplicationFeatureStatus::Installed {
|
||||
details: feature_details,
|
||||
},
|
||||
} => {
|
||||
details.extend(feature_details.clone());
|
||||
}
|
||||
HarmonyEvent::HarmonyFinished => {
|
||||
if !details.is_empty() {
|
||||
println!(
|
||||
"\n{} All done! Here's what's next for you:",
|
||||
theme::EMOJI_SUMMARY
|
||||
);
|
||||
for detail in details.iter() {
|
||||
println!("- {detail}");
|
||||
}
|
||||
println!();
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -8,6 +8,7 @@ use inquire::Confirm;
|
||||
use log::debug;
|
||||
|
||||
pub mod cli_logger; // FIXME: Don't make me pub
|
||||
mod cli_reporter;
|
||||
pub mod progress;
|
||||
pub mod theme;
|
||||
|
||||
@@ -116,6 +117,7 @@ pub async fn run_cli<T: Topology + Send + Sync + 'static>(
|
||||
args: Args,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
cli_logger::init();
|
||||
cli_reporter::init();
|
||||
|
||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||
maestro.register_all(scores);
|
||||
|
||||
@@ -9,6 +9,7 @@ pub static EMOJI_ERROR: Emoji<'_, '_> = Emoji("⚠️", "");
|
||||
pub static EMOJI_DEPLOY: Emoji<'_, '_> = Emoji("🚀", "");
|
||||
pub static EMOJI_TOPOLOGY: Emoji<'_, '_> = Emoji("📦", "");
|
||||
pub static EMOJI_SCORE: Emoji<'_, '_> = Emoji("🎶", "");
|
||||
pub static EMOJI_SUMMARY: Emoji<'_, '_> = Emoji("🚀", "");
|
||||
|
||||
lazy_static! {
|
||||
pub static ref SECTION_STYLE: ProgressStyle = ProgressStyle::default_spinner()
|
||||
|
||||
@@ -21,7 +21,6 @@ pub fn handle_events() {
|
||||
|
||||
instrumentation::subscribe("Harmony Composer Logger", {
|
||||
move |event| match event {
|
||||
HarmonyComposerEvent::HarmonyComposerStarted => {}
|
||||
HarmonyComposerEvent::ProjectInitializationStarted => {
|
||||
progress_tracker.add_section(
|
||||
SETUP_SECTION,
|
||||
|
||||
@@ -5,7 +5,6 @@ use crate::{HarmonyProfile, HarmonyTarget};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HarmonyComposerEvent {
|
||||
HarmonyComposerStarted,
|
||||
ProjectInitializationStarted,
|
||||
ProjectInitialized,
|
||||
ProjectCompilationStarted {
|
||||
|
||||