Compare commits

..

19 Commits

Author SHA1 Message Date
6e53397a58 commit
All checks were successful
Run Check Script / check (pull_request) Successful in 1m22s
2025-10-02 17:11:02 -04:00
8baf75a4fd commit 2025-10-02 16:59:08 -04:00
d53d040bac commit 2025-10-02 16:56:17 -04:00
687f11b261 commit 2025-10-02 16:53:25 -04:00
58e609767b cargo fmt 2025-10-02 16:46:43 -04:00
f75765408d fix: clippy
Some checks failed
Run Check Script / check (pull_request) Failing after 35s
2025-10-02 16:25:07 -04:00
cbbaae2ac8 okd_enable_user_workload_monitoring (#160)
Reviewed-on: #160
Co-authored-by: Willem <wrolleman@nationtech.io>
Co-committed-by: Willem <wrolleman@nationtech.io>
2025-09-29 14:32:38 +00:00
c84b2413ed Merge pull request 'fix: added securityContext.runAsUser:null to argo-cd helm chart so that in okd user group will be randomly assigned within the uid range for the designated namespace' (#156) from fix/argo-cd-redis into master
All checks were successful
Run Check Script / check (push) Successful in 57s
Compile and package harmony_composer / package_harmony_composer (push) Successful in 6m35s
Reviewed-on: #156
2025-09-12 13:54:02 +00:00
f83fd09f11 fix(monitoring): returned namespaced kube metrics
All checks were successful
Run Check Script / check (pull_request) Successful in 55s
2025-09-12 09:49:20 -04:00
c15bd53331 fix: added securityContext.runAsUser:null to argo-cd helm chart so that in okd user group will be randomly assigned within the uid range for the designated namespace
All checks were successful
Run Check Script / check (pull_request) Successful in 59s
2025-09-12 09:29:27 -04:00
6e6f57e38c Merge pull request 'fix: added routes to domain name for prometheus, grafana, alertmanageradded argo cd to the reporting after successfull build' (#155) from fix/add_routes_to_domain into master
All checks were successful
Run Check Script / check (push) Successful in 59s
Compile and package harmony_composer / package_harmony_composer (push) Successful in 6m27s
Reviewed-on: #155
2025-09-10 19:44:53 +00:00
6f55f79281 feat: Update readme with newer UX/DX Rust Leptos app, update slides and misc stuff
All checks were successful
Run Check Script / check (pull_request) Successful in 58s
2025-09-10 15:40:32 -04:00
19f87fdaf7 fix: added routes to domain name for prometheus, grafana, alertmanageradded argo cd to the reporting after successfull build
All checks were successful
Run Check Script / check (pull_request) Successful in 1m1s
2025-09-10 15:08:13 -04:00
49370af176 Merge pull request 'doc: Slides demo 10 sept' (#153) from feat/slides_demo_10sept into master
All checks were successful
Run Check Script / check (push) Successful in 1m4s
Compile and package harmony_composer / package_harmony_composer (push) Successful in 6m55s
Reviewed-on: #153
2025-09-10 17:14:48 +00:00
cf0b8326dc Merge pull request 'fix: properly configured discord alert receiver corrected domain and topic name for ntfy' (#154) from fix/alertreceivers into master
Some checks failed
Compile and package harmony_composer / package_harmony_composer (push) Waiting to run
Run Check Script / check (push) Has been cancelled
Reviewed-on: #154
2025-09-10 17:13:31 +00:00
1e2563f7d1 fix: added reporting to output ntfy topic
All checks were successful
Run Check Script / check (pull_request) Successful in 1m2s
2025-09-10 13:10:06 -04:00
7f50c36f11 Merge pull request 'fix: Various demo fixe and rename : RHOBMonitoring -> Monitoring, ContinuousDelivery -> PackagingDeployment, Fix bollard logs' (#152) from fix/demo into master
All checks were successful
Run Check Script / check (push) Successful in 1m1s
Compile and package harmony_composer / package_harmony_composer (push) Successful in 6m50s
Reviewed-on: #152
2025-09-10 17:01:15 +00:00
49dad343ad fix: properly configured discord alert receiver corrected domain and topic name for ntfy
All checks were successful
Run Check Script / check (pull_request) Successful in 1m2s
2025-09-10 12:53:43 -04:00
9961e8b79d doc: Slides demo 10 sept 2025-09-10 12:38:25 -04:00
62 changed files with 966 additions and 183 deletions

View File

@@ -36,48 +36,59 @@ These principles surface as simple, ergonomic Rust APIs that let teams focus on
## 2 · Quick Start
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
```rust
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::{
lamp::{LAMPConfig, LAMPScore},
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{PackagingDeployment, rhob_monitoring::Monitoring},
},
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
},
topology::{K8sAnywhereTopology, Url},
topology::K8sAnywhereTopology,
};
use harmony_macros::hurl;
use std::{path::PathBuf, sync::Arc};
#[tokio::main]
async fn main() {
// 1. Describe what you want
let lamp_stack = LAMPScore {
name: "harmony-lamp-demo".into(),
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
php_version: Version::from("8.3.0").unwrap(),
config: LAMPConfig {
project_root: "./php".into(),
database_size: "4Gi".into(),
..Default::default()
},
let application = Arc::new(RustWebapp {
name: "harmony-example-leptos".to_string(),
project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder
framework: Some(RustWebFramework::Leptos),
service_port: 8080,
});
// Define your Application deployment and the features you want
let app = ApplicationScore {
features: vec![
Box::new(PackagingDeployment {
application: application.clone(),
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![
Box::new(DiscordWebhook {
name: "test-discord".to_string(),
url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url
}),
],
}),
],
application,
};
// 2. Enhance with extra scores (monitoring, CI/CD, …)
let mut monitoring = MonitoringAlertingStackScore::new();
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
// 3. Run your scores on the desired topology & inventory
harmony_cli::run(
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
vec![
Box::new(lamp_stack),
Box::new(monitoring)
],
None
).await.unwrap();
Inventory::autoload(),
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster
vec![Box::new(app)],
None,
)
.await
.unwrap();
}
```

View File

@@ -0,0 +1,3 @@
.terraform
*.tfstate
venv

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View File

@@ -0,0 +1,5 @@
To build :
```bash
npx @marp-team/marp-cli@latest -w slides.md
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -0,0 +1,9 @@
To run this :
```bash
virtualenv venv
source venv/bin/activate
pip install ansible ansible-dev-tools
ansible-lint download.yml
ansible-playbook -i localhost download.yml
```

View File

@@ -0,0 +1,8 @@
- name: Test Ansible URL Validation
hosts: localhost
tasks:
- name: Download a file
ansible.builtin.get_url:
url: "http:/wikipedia.org/"
dest: "/tmp/ansible-test/wikipedia.html"
mode: '0900'

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 275 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 212 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 384 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 KiB

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,241 @@
---
theme: uncover
---
# Voici l'histoire de Petit Poisson
---
<img src="./Happy_swimmer.jpg" width="600"/>
---
<img src="./happy_landscape_swimmer.jpg" width="1000"/>
---
<img src="./Happy_swimmer.jpg" width="200"/>
<img src="./tryrust.org.png" width="600"/>
[https://tryrust.org](https://tryrust.org)
---
<img src="./texto_deploy_prod_1.png" width="600"/>
---
<img src="./texto_deploy_prod_2.png" width="600"/>
---
<img src="./texto_deploy_prod_3.png" width="600"/>
---
<img src="./texto_deploy_prod_4.png" width="600"/>
---
## Demo time
---
<img src="./Happy_swimmer_sunglasses.jpg" width="1000"/>
---
<img src="./texto_download_wikipedia.png" width="600"/>
---
<img src="./ansible.jpg" width="200"/>
## Ansible❓
---
<img src="./Happy_swimmer.jpg" width="200"/>
```yaml
- name: Download wikipedia
hosts: localhost
tasks:
- name: Download a file
ansible.builtin.get_url:
url: "https:/wikipedia.org/"
dest: "/tmp/ansible-test/wikipedia.html"
mode: '0900'
```
---
<img src="./Happy_swimmer.jpg" width="200"/>
```
ansible-lint download.yml
Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'.
```
---
```
git push
```
---
<img src="./75_years_later.jpg" width="1100"/>
---
<img src="./texto_download_wikipedia_fail.png" width="600"/>
---
<img src="./Happy_swimmer_reversed.jpg" width="600"/>
---
<img src="./ansible_output_fail.jpg" width="1100"/>
---
<img src="./Happy_swimmer_reversed_1hit.jpg" width="600"/>
---
<img src="./ansible_crossed_out.jpg" width="400"/>
---
<img src="./terraform.jpg" width="400"/>
## Terraform❓❗
---
<img src="./Happy_swimmer_reversed_1hit.jpg" width="200"/>
<img src="./terraform.jpg" width="200"/>
```tf
provider "docker" {}
resource "docker_network" "invalid_network" {
name = "my-invalid-network"
ipam_config {
subnet = "172.17.0.0/33"
}
}
```
---
<img src="./Happy_swimmer_reversed_1hit.jpg" width="100"/>
<img src="./terraform.jpg" width="200"/>
```
terraform plan
Terraform used the selected providers to generate the following execution plan.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# docker_network.invalid_network will be created
+ resource "docker_network" "invalid_network" {
+ driver = (known after apply)
+ id = (known after apply)
+ internal = (known after apply)
+ ipam_driver = "default"
+ name = "my-invalid-network"
+ options = (known after apply)
+ scope = (known after apply)
+ ipam_config {
+ subnet = "172.17.0.0/33"
# (2 unchanged attributes hidden)
}
}
Plan: 1 to add, 0 to change, 0 to destroy.
```
---
---
```
terraform apply
```
---
```
Plan: 1 to add, 0 to change, 0 to destroy.
Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.
Enter a value: yes
```
---
```
docker_network.invalid_network: Creating...
│ Error: Unable to create network: Error response from daemon: invalid network config:
│ invalid subnet 172.17.0.0/33: invalid CIDR block notation
│ with docker_network.invalid_network,
│ on main.tf line 11, in resource "docker_network" "invalid_network":
│ 11: resource "docker_network" "invalid_network" {
```
---
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="1100"/>
---
<img src="./ansible_crossed_out.jpg" width="300"/>
<img src="./terraform_crossed_out.jpg" width="400"/>
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="300"/>
---
## Harmony❓❗
---
Demo time
---
<img src="./Happy_swimmer.jpg" width="300"/>
---
# 🎼
Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony)
<img src="./qrcode_gitea_nationtech.png" width="120"/>
LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/)
Courriel : [jg@nationtech.io](mailto:jg@nationtech.io)

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/http" {
version = "3.5.0"
hashes = [
"h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=",
"zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7",
"zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736",
"zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476",
"zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90",
"zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a",
"zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6",
"zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6",
"zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee",
"zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926",
"zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.5.3"
hashes = [
"h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=",
"zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927",
"zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e",
"zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf",
"zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d",
"zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09",
"zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f",
"zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1",
"zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6",
"zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47",
"zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82",
]
}

View File

@@ -0,0 +1,10 @@
provider "http" {}
data "http" "remote_file" {
url = "http:/example.com/file.txt"
}
resource "local_file" "downloaded_file" {
content = data.http.remote_file.body
filename = "${path.module}/downloaded_file.txt"
}

View File

@@ -0,0 +1,24 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/kreuzwerker/docker" {
version = "3.0.2"
constraints = "~> 3.0.1"
hashes = [
"h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=",
"zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f",
"zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95",
"zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138",
"zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da",
"zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2",
"zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06",
"zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1",
"zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710",
"zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005",
"zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d",
"zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792",
"zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387",
"zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62",
"zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d",
]
}

View File

@@ -0,0 +1,17 @@
terraform {
required_providers {
docker = {
source = "kreuzwerker/docker"
version = "~> 3.0.1" # Adjust version as needed
}
}
}
provider "docker" {}
resource "docker_network" "invalid_network" {
name = "my-invalid-network"
ipam_config {
subnet = "172.17.0.0/33"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 325 KiB

View File

@@ -16,16 +16,13 @@ use std::{path::PathBuf, sync::Arc};
async fn main() {
let application = Arc::new(RustWebapp {
name: "harmony-example-tryrust".to_string(),
project_root: PathBuf::from("./tryrust.org"),
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
// submodule
framework: Some(RustWebFramework::Leptos),
service_port: 8080,
});
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: hurl!("https://discord.doesnt.exist.com"),
};
// Define your Application deployment and the features you want
let app = ApplicationScore {
features: vec![
Box::new(PackagingDeployment {
@@ -33,7 +30,10 @@ async fn main() {
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver)],
alert_receiver: vec![Box::new(DiscordWebhook {
name: "test-discord".to_string(),
url: hurl!("https://discord.doesnt.exist.com"),
})],
}),
],
application,
@@ -41,7 +41,7 @@ async fn main() {
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned k3d by default or connect to any kubernetes cluster
vec![Box::new(app)],
None,
)

View File

@@ -1,3 +1,5 @@
use std::time::Duration;
use derive_new::new;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
@@ -8,6 +10,7 @@ use kube::{
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
error::DiscoveryError,
runtime::reflector::Lookup,
};
use kube::{api::DynamicObject, runtime::conditions};
@@ -17,9 +20,9 @@ use kube::{
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::{Value, json};
use serde_json::json;
use similar::TextDiff;
use tokio::io::AsyncReadExt;
use tokio::{io::AsyncReadExt, time::sleep};
#[derive(new, Clone)]
pub struct K8sClient {
@@ -65,7 +68,7 @@ impl K8sClient {
} else {
Api::default_namespaced_with(self.client.clone(), &gvk)
};
Ok(resource.get(name).await?)
resource.get(name).await
}
pub async fn get_deployment(
@@ -78,7 +81,7 @@ impl K8sClient {
} else {
Api::default_namespaced(self.client.clone())
};
Ok(deps.get_opt(name).await?)
deps.get_opt(name).await
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
@@ -87,7 +90,7 @@ impl K8sClient {
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
pods.get_opt(name).await
}
pub async fn scale_deployment(
@@ -153,6 +156,39 @@ impl K8sClient {
}
}
pub async fn wait_for_pod_ready(
&self,
pod_name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let mut elapsed = 0;
let interval = 5; // seconds between checks
let timeout_secs = 120;
loop {
let pod = self.get_pod(pod_name, namespace).await?;
if let Some(p) = pod
&& let Some(status) = p.status
&& let Some(phase) = status.phase
&& phase.to_lowercase() == "running"
{
return Ok(());
}
if elapsed >= timeout_secs {
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"'{}' in ns '{}' did not become ready within {}s",
pod_name,
namespace.unwrap(),
timeout_secs
))));
}
sleep(Duration::from_secs(interval)).await;
elapsed += interval;
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
@@ -199,7 +235,7 @@ impl K8sClient {
if let Some(s) = status.status {
let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout().take() {
if let Some(mut stdout) = process.stdout() {
stdout
.read_to_string(&mut stdout_buf)
.await
@@ -419,9 +455,12 @@ impl K8sClient {
.as_str()
.expect("couldn't get kind as str");
let split: Vec<&str> = api_version.splitn(2, "/").collect();
let g = split[0];
let v = split[1];
let mut it = api_version.splitn(2, '/');
let first = it.next().unwrap();
let (g, v) = match it.next() {
Some(second) => (first, second),
None => ("", first),
};
let gvk = GroupVersionKind::gvk(g, v, kind);
let api_resource = ApiResource::from_gvk(&gvk);

View File

@@ -212,11 +212,11 @@ impl K8sAnywhereTopology {
.await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 {
return Ok(());
Ok(())
} else {
return Err(PreparationError::new(
Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(),
));
))
}
}

View File

@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
impl InventoryRepositoryFactory {
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
Ok(Box::new(
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
SqliteInventoryRepository::new(&DATABASE_URL).await?,
))
}
}

View File

@@ -36,7 +36,7 @@ impl HttpServer for OPNSenseFirewall {
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
let path = match &file.path {
crate::data::FilePath::Relative(path) => {
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path)
}
crate::data::FilePath::Absolute(path) => {
return Err(ExecutorError::ConfigurationError(format!(

View File

@@ -182,16 +182,12 @@ pub(crate) fn get_health_check_for_backend(
let uppercase = binding.as_str();
match uppercase {
"TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
if !checkport.is_empty() {
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| {
panic!(
"HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
}
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref()
&& !checkport.is_empty()
{
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| panic!("HAProxy check port should be a valid port number, got {checkport}"),
))));
}
Some(HealthCheck::TCP(None))
}

View File

@@ -8,7 +8,6 @@ mod tftp;
use std::sync::Arc;
pub use management::*;
use opnsense_config_xml::Host;
use tokio::sync::RwLock;
use crate::{executors::ExecutorError, topology::LogicalHost};

View File

@@ -1,10 +1,8 @@
use async_trait::async_trait;
use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use kube::api::GroupVersionKind;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use serde::de::DeserializeOwned;
use std::{process::Command, str::FromStr, sync::Arc};
use std::{str::FromStr, sync::Arc};
use crate::{
data::Version,
@@ -12,10 +10,7 @@ use crate::{
inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score,
topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
},
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
};
use harmony_types::id::Id;
@@ -55,7 +50,8 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
topology: &T,
) -> Result<Outcome, InterpretError> {
let k8s_client = topology.k8s_client().await?;
let domain = topology.get_domain("argo").await?;
let svc = format!("argo-{}", self.score.namespace.clone());
let domain = topology.get_domain(&svc).await?;
let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
@@ -66,14 +62,17 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
.await
.unwrap();
Ok(Outcome::success(format!(
"ArgoCD installed with {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
)))
Ok(Outcome::success_with_details(
format!(
"ArgoCD {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
),
vec![format!("argo application: http://{}", domain)],
))
}
fn get_name(&self) -> InterpretName {
@@ -115,13 +114,13 @@ impl ArgoInterpret {
match ic.data["status"]["domain"].as_str() {
Some(domain) => return Ok(domain.to_string()),
None => return Err(InterpretError::new("Could not find domain".to_string())),
None => Err(InterpretError::new("Could not find domain".to_string())),
}
}
false => {
todo!()
}
};
}
}
}
@@ -156,6 +155,9 @@ global:
## Used for ingresses, certificates, SSO, notifications, etc.
domain: {domain}
securityContext:
runAsUser: null
# -- Runtime class name for all components
runtimeClassName: ""
@@ -467,6 +469,13 @@ redis:
# -- Redis name
name: redis
serviceAccount:
create: true
securityContext:
runAsUser: null
## Redis image
image:
# -- Redis repository

View File

@@ -190,7 +190,7 @@ impl<
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: format!("{}", self.application.name()),
namespace: self.application.name().to_string(),
openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
@@ -198,8 +198,8 @@ impl<
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None,
name: format!("{}", self.application.name()),
namespace: format!("{}", self.application.name()),
name: self.application.name().to_string(),
namespace: self.application.name().to_string(),
})],
};
score

View File

@@ -3,7 +3,6 @@ use std::sync::Arc;
use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome,
};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
@@ -64,12 +63,13 @@ impl<
application: self.application.clone(),
receivers: self.alert_receiver.clone(),
};
let domain = topology
.get_domain("ntfy")
.await
.map_err(|e| format!("could not get domain {e}"))?;
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: topology
.get_domain("ntfy")
.await
.map_err(|e| format!("Could not get domain {e}"))?,
host: domain.clone(),
};
ntfy.interpret(&Inventory::empty(), topology)
.await
@@ -91,27 +91,33 @@ impl<
.replace("=", "");
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
let ntfy_receiver = WebhookReceiver {
name: "ntfy-webhook".to_string(),
url: Url::Url(
url::Url::parse(
format!(
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
namespace.clone()
"http://{domain}/{}?auth={ntfy_default_auth_param}",
self.application.name()
)
.as_str(),
)
.unwrap(),
),
};
debug!(
"ntfy webhook receiver \n{:#?}\nntfy topic: {}",
ntfy_receiver.clone(),
self.application.name()
);
alerting_score.receivers.push(Box::new(ntfy_receiver));
alerting_score
.interpret(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
Ok(InstallationOutcome::success())
Ok(InstallationOutcome::success_with_details(vec![format!(
"ntfy topic: {}",
self.application.name()
)]))
}
fn name(&self) -> String {
"Monitoring".to_string()

View File

@@ -194,10 +194,10 @@ impl RustWebapp {
Some(body_full(tar_data.into())),
);
while let Some(mut msg) = image_build_stream.next().await {
while let Some(msg) = image_build_stream.next().await {
trace!("Got bollard msg {msg:?}");
match msg {
Ok(mut msg) => {
Ok(msg) => {
if let Some(progress) = msg.progress_detail {
info!(
"Build progress {}/{}",
@@ -257,6 +257,7 @@ impl RustWebapp {
".harmony_generated",
"harmony",
"node_modules",
"Dockerfile.harmony",
];
let mut entries: Vec<_> = WalkDir::new(project_root)
.into_iter()
@@ -510,25 +511,23 @@ ingress:
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
// Create templates/_helpers.tpl
let helpers_tpl = format!(
r#"
{{{{/*
let helpers_tpl = r#"
{{/*
Expand the name of the chart.
*/}}}}
{{{{- define "chart.name" -}}}}
{{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
{{{{- end }}}}
*/}}
{{- define "chart.name" -}}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{{{/*
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}}}
{{{{- define "chart.fullname" -}}}}
{{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
{{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
{{{{- end }}}}
"#
);
*/}}
{{- define "chart.fullname" -}}
{{- $name := default .Chart.Name $.Values.nameOverride }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
"#.to_string();
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
// Create templates/service.yaml

View File

@@ -66,8 +66,7 @@ impl HelmCommandExecutor {
.is_none()
{
if self.chart.repo.is_none() {
return Err(std::io::Error::new(
ErrorKind::Other,
return Err(std::io::Error::other(
"Chart doesn't exist locally and no repo specified",
));
}
@@ -107,10 +106,10 @@ impl HelmCommandExecutor {
}
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
if let Some(d) = self.debug
&& d
{
args.push("--debug".to_string());
}
let path = if let Some(p) = self.path {
@@ -234,28 +233,28 @@ impl HelmChart {
args.push(kv);
}
if let Some(crd) = self.include_crds {
if crd {
args.push("--include-crds".to_string());
}
if let Some(crd) = self.include_crds
&& crd
{
args.push("--include-crds".to_string());
}
if let Some(st) = self.skip_tests {
if st {
args.push("--skip-tests".to_string());
}
if let Some(st) = self.skip_tests
&& st
{
args.push("--skip-tests".to_string());
}
if let Some(sh) = self.skip_hooks {
if sh {
args.push("--no-hooks".to_string());
}
if let Some(sh) = self.skip_hooks
&& sh
{
args.push("--no-hooks".to_string());
}
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
if let Some(d) = self.debug
&& d
{
args.push("--debug".to_string());
}
args

View File

@@ -63,7 +63,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
}
for f in self.score.files.iter() {
http_server.serve_file_content(&f).await?
http_server.serve_file_content(f).await?
}
http_server.commit_config().await?;

View File

@@ -92,7 +92,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
);
return Err(InterpretError::new(format!(
"Could not select host : {}",
e.to_string()
e
)));
}
}

View File

@@ -141,7 +141,10 @@ impl<T: Topology + K8sclient> Interpret<T> for K8sIngressInterpret {
InterpretStatus::SUCCESS => {
let details = match &self.namespace {
Some(namespace) => {
vec![format!("{} ({namespace}): {}", self.service, self.host)]
vec![format!(
"{} ({namespace}): http://{}",
self.service, self.host
)]
}
None => vec![format!("{}: {}", self.service, self.host)],
};

View File

@@ -35,6 +35,24 @@ pub struct DiscordWebhook {
#[async_trait]
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
let ns = sender.namespace.clone();
let secret_name = format!("{}-secret", self.name.clone());
let webhook_key = format!("{}", self.url.clone());
let mut string_data = BTreeMap::new();
string_data.insert("webhook-url".to_string(), webhook_key.clone());
let secret = Secret {
metadata: kube::core::ObjectMeta {
name: Some(secret_name.clone()),
..Default::default()
},
string_data: Some(string_data),
type_: Some("Opaque".to_string()),
..Default::default()
};
let _ = sender.client.apply(&secret, Some(&ns)).await;
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
data: json!({
"route": {
@@ -43,9 +61,14 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
"receivers": [
{
"name": self.name,
"webhookConfigs": [
"discordConfigs": [
{
"url": self.url,
"apiURL": {
"name": secret_name,
"key": "webhook-url",
},
"title": "{{ template \"discord.default.title\" . }}",
"message": "{{ template \"discord.default.message\" . }}"
}
]
}

View File

@@ -43,6 +43,11 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
"webhookConfigs": [
{
"url": self.url,
"httpConfig": {
"tlsConfig": {
"insecureSkipVerify": true
}
}
}
]
}

View File

@@ -9,9 +9,7 @@ use crate::{
inventory::Inventory,
modules::{
application::Application,
monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
},
monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability,
prometheus::prometheus::PrometheusApplicationMonitoring,
},
score::Score,

View File

@@ -1,12 +1,8 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
LabelSelector, PrometheusSpec,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@@ -4,4 +4,5 @@ pub mod application_monitoring;
pub mod grafana;
pub mod kube_prometheus;
pub mod ntfy;
pub mod okd;
pub mod prometheus;

View File

@@ -0,0 +1,149 @@
use std::{collections::BTreeMap, sync::Arc};
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
use async_trait::async_trait;
use harmony_types::id::Id;
use k8s_openapi::api::core::v1::ConfigMap;
use kube::api::ObjectMeta;
use serde::Serialize;
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoring {}
impl<T: Topology + K8sclient> Score<T> for OpenshiftUserWorkloadMonitoring {
fn name(&self) -> String {
"OpenshiftUserWorkloadMonitoringScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OpenshiftUserWorkloadMonitoringInterpret {})
}
}
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoringInterpret {}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.update_cluster_monitoring_config_cm(&client).await?;
self.update_user_workload_monitoring_config_cm(&client)
.await?;
self.verify_user_workload(&client).await?;
Ok(Outcome::success(
"successfully enabled user-workload-monitoring".to_string(),
))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OpenshiftUserWorkloadMonitoring")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl OpenshiftUserWorkloadMonitoringInterpret {
pub async fn update_cluster_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
enableUserWorkload: true
alertmanagerMain:
enableUserAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("cluster-monitoring-config".to_string()),
namespace: Some("openshift-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client.apply(&cm, Some("openshift-monitoring")).await?;
Ok(Outcome::success(
"updated cluster-monitoring-config-map".to_string(),
))
}
pub async fn update_user_workload_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
alertmanager:
enabled: true
enableAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("user-workload-monitoring-config".to_string()),
namespace: Some("openshift-user-workload-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client
.apply(&cm, Some("openshift-user-workload-monitoring"))
.await?;
Ok(Outcome::success(
"updated openshift-user-monitoring-config-map".to_string(),
))
}
pub async fn verify_user_workload(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let namespace = "openshift-user-workload-monitoring";
let alertmanager_name = "alertmanager-user-workload-0";
let prometheus_name = "prometheus-user-workload-0";
client
.wait_for_pod_ready(alertmanager_name, Some(namespace))
.await?;
client
.wait_for_pod_ready(prometheus_name, Some(namespace))
.await?;
Ok(Outcome::success(format!(
"pods: {}, {} ready in ns: {}",
alertmanager_name, prometheus_name, namespace
)))
}
}

View File

@@ -0,0 +1 @@
pub mod enable_user_workload;

View File

@@ -52,6 +52,12 @@ pub struct OKDSetup02BootstrapInterpret {
status: InterpretStatus,
}
impl Default for OKDSetup02BootstrapInterpret {
fn default() -> Self {
Self::new()
}
}
impl OKDSetup02BootstrapInterpret {
pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap();
@@ -98,9 +104,9 @@ impl OKDSetup02BootstrapInterpret {
InterpretError::new(format!("Failed to create okd installation directory : {e}"))
})?;
if !exit_status.success() {
return Err(InterpretError::new(format!(
"Failed to create okd installation directory"
)));
return Err(InterpretError::new(
"Failed to create okd installation directory".to_string(),
));
} else {
info!(
"Created OKD installation directory {}",

View File

@@ -42,7 +42,7 @@ pub fn alert_pod_not_ready() -> PrometheusAlertRule {
PrometheusAlertRule {
alert: "PodNotReady".into(),
expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(),
r#for: Some("2m".into()),
r#for: Some("30s".into()),
labels: HashMap::from([("severity".into(), "warning".into())]),
annotations: HashMap::from([
("summary".into(), "Pod is not ready".into()),

View File

@@ -12,9 +12,6 @@ use std::process::Command;
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
Alertmanager, AlertmanagerSpec,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
GrafanaDatasourceSpec, GrafanaSpec,
@@ -25,13 +22,8 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
PrometheusRule, PrometheusRuleSpec, RuleGroup,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
build_prom_role, build_prom_rolebinding, build_prom_service_account,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
ServiceMonitor, ServiceMonitorSpec,
};
@@ -262,7 +254,7 @@ impl RHOBAlertingInterpret {
let stack = MonitoringStack {
metadata: ObjectMeta {
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
name: Some(format!("{}-monitoring", self.sender.namespace.clone())),
namespace: Some(self.sender.namespace.clone()),
labels: Some([("monitoring-stack".into(), "true".into())].into()),
..Default::default()
@@ -286,7 +278,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = format!("alertmanager-operated");
let backend_service = "alertmanager-operated".to_string();
let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name),
@@ -303,7 +295,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = format!("prometheus-operated");
let backend_service = "prometheus-operated".to_string();
let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&prometheus_domain),

View File

@@ -25,7 +25,7 @@ pub struct CephRemoveOsd {
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
fn name(&self) -> String {
format!("CephRemoveOsdScore")
"CephRemoveOsdScore".to_string()
}
#[doc(hidden)]
@@ -118,14 +118,14 @@ impl CephRemoveOsdInterpret {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
return Ok(Outcome::success(format!(
Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count
)));
)))
} else {
return Err(InterpretError::new(
Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(),
));
))
}
} else {
Err(InterpretError::new(format!(
@@ -181,15 +181,14 @@ impl CephRemoveOsdInterpret {
)
.await?;
if let Some(deployment) = dep {
if let Some(status) = deployment.status {
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
{
return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(),
));
}
}
if let Some(deployment) = dep
&& let Some(status) = deployment.status
&& status.replicas.unwrap_or(1) == 0
&& status.ready_replicas.unwrap_or(1) == 0
{
return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(),
));
}
if start.elapsed() > timeout {

View File

@@ -20,7 +20,7 @@ pub struct CephVerifyClusterHealth {
impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth {
fn name(&self) -> String {
format!("CephValidateClusterHealth")
"CephValidateClusterHealth".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
@@ -80,14 +80,14 @@ impl CephVerifyClusterHealthInterpret {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
return Ok(Outcome::success(format!(
Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count
)));
)))
} else {
return Err(InterpretError::new(
Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(),
));
))
}
} else {
Err(InterpretError::new(format!(
@@ -123,9 +123,9 @@ impl CephVerifyClusterHealthInterpret {
.await?;
if health.contains("HEALTH_OK") {
return Ok(Outcome::success(
Ok(Outcome::success(
"Ceph Cluster in healthy state".to_string(),
));
))
} else {
Err(InterpretError::new(format!(
"Ceph cluster unhealthy {}",