Compare commits
1 Commits
feat/gen-c
...
secrets-pr
| Author | SHA1 | Date | |
|---|---|---|---|
| c5f46d676b |
1
Cargo.lock
generated
@@ -3124,6 +3124,7 @@ dependencies = [
|
|||||||
"fxhash",
|
"fxhash",
|
||||||
"newline-converter",
|
"newline-converter",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
"tempfile",
|
||||||
"unicode-segmentation",
|
"unicode-segmentation",
|
||||||
"unicode-width 0.1.14",
|
"unicode-width 0.1.14",
|
||||||
]
|
]
|
||||||
|
|||||||
15
Cargo.toml
@@ -14,7 +14,8 @@ members = [
|
|||||||
"harmony_composer",
|
"harmony_composer",
|
||||||
"harmony_inventory_agent",
|
"harmony_inventory_agent",
|
||||||
"harmony_secret_derive",
|
"harmony_secret_derive",
|
||||||
"harmony_secret", "adr/agent_discovery/mdns",
|
"harmony_secret",
|
||||||
|
"adr/agent_discovery/mdns",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -50,7 +51,7 @@ k8s-openapi = { version = "0.25", features = ["v1_30"] }
|
|||||||
serde_yaml = "0.9"
|
serde_yaml = "0.9"
|
||||||
serde-value = "0.7"
|
serde-value = "0.7"
|
||||||
http = "1.2"
|
http = "1.2"
|
||||||
inquire = "0.7"
|
inquire = { version = "0.7", features = ["editor"] }
|
||||||
convert_case = "0.8"
|
convert_case = "0.8"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
similar = "2"
|
similar = "2"
|
||||||
@@ -66,5 +67,11 @@ thiserror = "2.0.14"
|
|||||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||||
serde_json = "1.0.127"
|
serde_json = "1.0.127"
|
||||||
askama = "0.14"
|
askama = "0.14"
|
||||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
|
||||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
reqwest = { version = "0.12", features = [
|
||||||
|
"blocking",
|
||||||
|
"stream",
|
||||||
|
"rustls-tls",
|
||||||
|
"http2",
|
||||||
|
"json",
|
||||||
|
], default-features = false }
|
||||||
|
|||||||
69
README.md
@@ -36,59 +36,48 @@ These principles surface as simple, ergonomic Rust APIs that let teams focus on
|
|||||||
|
|
||||||
## 2 · Quick Start
|
## 2 · Quick Start
|
||||||
|
|
||||||
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
use harmony::{
|
use harmony::{
|
||||||
|
data::Version,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
application::{
|
lamp::{LAMPConfig, LAMPScore},
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
|
||||||
},
|
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::{K8sAnywhereTopology, Url},
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
|
||||||
use std::{path::PathBuf, sync::Arc};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
// 1. Describe what you want
|
||||||
name: "harmony-example-leptos".to_string(),
|
let lamp_stack = LAMPScore {
|
||||||
project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder
|
name: "harmony-lamp-demo".into(),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
|
||||||
service_port: 8080,
|
php_version: Version::from("8.3.0").unwrap(),
|
||||||
});
|
config: LAMPConfig {
|
||||||
|
project_root: "./php".into(),
|
||||||
// Define your Application deployment and the features you want
|
database_size: "4Gi".into(),
|
||||||
let app = ApplicationScore {
|
..Default::default()
|
||||||
features: vec![
|
},
|
||||||
Box::new(PackagingDeployment {
|
|
||||||
application: application.clone(),
|
|
||||||
}),
|
|
||||||
Box::new(Monitoring {
|
|
||||||
application: application.clone(),
|
|
||||||
alert_receiver: vec![
|
|
||||||
Box::new(DiscordWebhook {
|
|
||||||
name: "test-discord".to_string(),
|
|
||||||
url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
application,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// 2. Enhance with extra scores (monitoring, CI/CD, …)
|
||||||
|
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||||
|
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||||
|
|
||||||
|
// 3. Run your scores on the desired topology & inventory
|
||||||
harmony_cli::run(
|
harmony_cli::run(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster
|
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||||
vec![Box::new(app)],
|
vec![
|
||||||
None,
|
Box::new(lamp_stack),
|
||||||
)
|
Box::new(monitoring)
|
||||||
.await
|
],
|
||||||
.unwrap();
|
None
|
||||||
|
).await.unwrap();
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
.terraform
|
|
||||||
*.tfstate
|
|
||||||
venv
|
|
||||||
|
Before Width: | Height: | Size: 72 KiB |
|
Before Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 100 KiB |
@@ -1,5 +0,0 @@
|
|||||||
To build :
|
|
||||||
|
|
||||||
```bash
|
|
||||||
npx @marp-team/marp-cli@latest -w slides.md
|
|
||||||
```
|
|
||||||
|
Before Width: | Height: | Size: 11 KiB |
@@ -1,9 +0,0 @@
|
|||||||
To run this :
|
|
||||||
|
|
||||||
```bash
|
|
||||||
virtualenv venv
|
|
||||||
source venv/bin/activate
|
|
||||||
pip install ansible ansible-dev-tools
|
|
||||||
ansible-lint download.yml
|
|
||||||
ansible-playbook -i localhost download.yml
|
|
||||||
```
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
- name: Test Ansible URL Validation
|
|
||||||
hosts: localhost
|
|
||||||
tasks:
|
|
||||||
- name: Download a file
|
|
||||||
ansible.builtin.get_url:
|
|
||||||
url: "http:/wikipedia.org/"
|
|
||||||
dest: "/tmp/ansible-test/wikipedia.html"
|
|
||||||
mode: '0900'
|
|
||||||
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 275 KiB |
|
Before Width: | Height: | Size: 212 KiB |
|
Before Width: | Height: | Size: 384 KiB |
|
Before Width: | Height: | Size: 8.3 KiB |
@@ -1,241 +0,0 @@
|
|||||||
---
|
|
||||||
theme: uncover
|
|
||||||
---
|
|
||||||
|
|
||||||
# Voici l'histoire de Petit Poisson
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./happy_landscape_swimmer.jpg" width="1000"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
|
||||||
|
|
||||||
<img src="./tryrust.org.png" width="600"/>
|
|
||||||
|
|
||||||
[https://tryrust.org](https://tryrust.org)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_1.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_2.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_3.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_deploy_prod_4.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Demo time
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_sunglasses.jpg" width="1000"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_download_wikipedia.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible.jpg" width="200"/>
|
|
||||||
|
|
||||||
## Ansible❓
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: Download wikipedia
|
|
||||||
hosts: localhost
|
|
||||||
tasks:
|
|
||||||
- name: Download a file
|
|
||||||
ansible.builtin.get_url:
|
|
||||||
url: "https:/wikipedia.org/"
|
|
||||||
dest: "/tmp/ansible-test/wikipedia.html"
|
|
||||||
mode: '0900'
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
|
||||||
|
|
||||||
```
|
|
||||||
ansible-lint download.yml
|
|
||||||
|
|
||||||
Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'.
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
git push
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./75_years_later.jpg" width="1100"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./texto_download_wikipedia_fail.png" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed.jpg" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible_output_fail.jpg" width="1100"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="600"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible_crossed_out.jpg" width="400"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
<img src="./terraform.jpg" width="400"/>
|
|
||||||
|
|
||||||
## Terraform❓❗
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="200"/>
|
|
||||||
<img src="./terraform.jpg" width="200"/>
|
|
||||||
|
|
||||||
```tf
|
|
||||||
provider "docker" {}
|
|
||||||
|
|
||||||
resource "docker_network" "invalid_network" {
|
|
||||||
name = "my-invalid-network"
|
|
||||||
|
|
||||||
ipam_config {
|
|
||||||
subnet = "172.17.0.0/33"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="100"/>
|
|
||||||
<img src="./terraform.jpg" width="200"/>
|
|
||||||
|
|
||||||
```
|
|
||||||
terraform plan
|
|
||||||
|
|
||||||
Terraform used the selected providers to generate the following execution plan.
|
|
||||||
Resource actions are indicated with the following symbols:
|
|
||||||
+ create
|
|
||||||
|
|
||||||
Terraform will perform the following actions:
|
|
||||||
|
|
||||||
# docker_network.invalid_network will be created
|
|
||||||
+ resource "docker_network" "invalid_network" {
|
|
||||||
+ driver = (known after apply)
|
|
||||||
+ id = (known after apply)
|
|
||||||
+ internal = (known after apply)
|
|
||||||
+ ipam_driver = "default"
|
|
||||||
+ name = "my-invalid-network"
|
|
||||||
+ options = (known after apply)
|
|
||||||
+ scope = (known after apply)
|
|
||||||
|
|
||||||
+ ipam_config {
|
|
||||||
+ subnet = "172.17.0.0/33"
|
|
||||||
# (2 unchanged attributes hidden)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
✅
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
terraform apply
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
|
||||||
|
|
||||||
Do you want to perform these actions?
|
|
||||||
Terraform will perform the actions described above.
|
|
||||||
Only 'yes' will be accepted to approve.
|
|
||||||
|
|
||||||
Enter a value: yes
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
docker_network.invalid_network: Creating...
|
|
||||||
╷
|
|
||||||
│ Error: Unable to create network: Error response from daemon: invalid network config:
|
|
||||||
│ invalid subnet 172.17.0.0/33: invalid CIDR block notation
|
|
||||||
│
|
|
||||||
│ with docker_network.invalid_network,
|
|
||||||
│ on main.tf line 11, in resource "docker_network" "invalid_network":
|
|
||||||
│ 11: resource "docker_network" "invalid_network" {
|
|
||||||
│
|
|
||||||
╵
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="1100"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./ansible_crossed_out.jpg" width="300"/>
|
|
||||||
<img src="./terraform_crossed_out.jpg" width="400"/>
|
|
||||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="300"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Harmony❓❗
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Demo time
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<img src="./Happy_swimmer.jpg" width="300"/>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# 🎼
|
|
||||||
|
|
||||||
Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony)
|
|
||||||
|
|
||||||
|
|
||||||
<img src="./qrcode_gitea_nationtech.png" width="120"/>
|
|
||||||
|
|
||||||
|
|
||||||
LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/)
|
|
||||||
|
|
||||||
Courriel : [jg@nationtech.io](mailto:jg@nationtech.io)
|
|
||||||
|
Before Width: | Height: | Size: 11 KiB |
@@ -1,40 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/http" {
|
|
||||||
version = "3.5.0"
|
|
||||||
hashes = [
|
|
||||||
"h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=",
|
|
||||||
"zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7",
|
|
||||||
"zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736",
|
|
||||||
"zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476",
|
|
||||||
"zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90",
|
|
||||||
"zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a",
|
|
||||||
"zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6",
|
|
||||||
"zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6",
|
|
||||||
"zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee",
|
|
||||||
"zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926",
|
|
||||||
"zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/local" {
|
|
||||||
version = "2.5.3"
|
|
||||||
hashes = [
|
|
||||||
"h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=",
|
|
||||||
"zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927",
|
|
||||||
"zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e",
|
|
||||||
"zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf",
|
|
||||||
"zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d",
|
|
||||||
"zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09",
|
|
||||||
"zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f",
|
|
||||||
"zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1",
|
|
||||||
"zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6",
|
|
||||||
"zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47",
|
|
||||||
"zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
provider "http" {}
|
|
||||||
|
|
||||||
data "http" "remote_file" {
|
|
||||||
url = "http:/example.com/file.txt"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "local_file" "downloaded_file" {
|
|
||||||
content = data.http.remote_file.body
|
|
||||||
filename = "${path.module}/downloaded_file.txt"
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/kreuzwerker/docker" {
|
|
||||||
version = "3.0.2"
|
|
||||||
constraints = "~> 3.0.1"
|
|
||||||
hashes = [
|
|
||||||
"h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=",
|
|
||||||
"zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f",
|
|
||||||
"zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95",
|
|
||||||
"zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138",
|
|
||||||
"zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da",
|
|
||||||
"zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2",
|
|
||||||
"zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06",
|
|
||||||
"zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1",
|
|
||||||
"zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710",
|
|
||||||
"zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005",
|
|
||||||
"zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d",
|
|
||||||
"zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792",
|
|
||||||
"zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387",
|
|
||||||
"zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62",
|
|
||||||
"zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_providers {
|
|
||||||
docker = {
|
|
||||||
source = "kreuzwerker/docker"
|
|
||||||
version = "~> 3.0.1" # Adjust version as needed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
provider "docker" {}
|
|
||||||
|
|
||||||
resource "docker_network" "invalid_network" {
|
|
||||||
name = "my-invalid-network"
|
|
||||||
|
|
||||||
ipam_config {
|
|
||||||
subnet = "172.17.0.0/33"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Before Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 144 KiB |
|
Before Width: | Height: | Size: 58 KiB |
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 71 KiB |
|
Before Width: | Height: | Size: 81 KiB |
|
Before Width: | Height: | Size: 87 KiB |
|
Before Width: | Height: | Size: 88 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 325 KiB |
@@ -4,7 +4,8 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp, features::rhob_monitoring::Monitoring,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
|
features::rhob_monitoring::RHOBMonitoring,
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
@@ -28,7 +29,7 @@ async fn main() {
|
|||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
features: vec![
|
features: vec![
|
||||||
Box::new(Monitoring {
|
Box::new(RHOBMonitoring {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
alert_receiver: vec![Box::new(discord_receiver)],
|
alert_receiver: vec![Box::new(discord_receiver)],
|
||||||
}),
|
}),
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use harmony::{
|
|||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
features::{Monitoring, PackagingDeployment},
|
features::{ContinuousDelivery, Monitoring},
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::{
|
monitoring::alert_channel::{
|
||||||
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
||||||
@@ -36,7 +36,7 @@ async fn main() {
|
|||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
features: vec![
|
features: vec![
|
||||||
Box::new(PackagingDeployment {
|
Box::new(ContinuousDelivery {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
}),
|
}),
|
||||||
Box::new(Monitoring {
|
Box::new(Monitoring {
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
harmony
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "harmony-tryrust"
|
|
||||||
edition = "2024"
|
|
||||||
version = "0.1.0"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { path = "../../../nationtech/harmony/harmony" }
|
|
||||||
harmony_cli = { path = "../../../nationtech/harmony/harmony_cli" }
|
|
||||||
harmony_types = { path = "../../../nationtech/harmony/harmony_types" }
|
|
||||||
harmony_macros = { path = "../../../nationtech/harmony/harmony_macros" }
|
|
||||||
tokio = { version = "1.40", features = [
|
|
||||||
"io-std",
|
|
||||||
"fs",
|
|
||||||
"macros",
|
|
||||||
"rt-multi-thread",
|
|
||||||
] }
|
|
||||||
log = { version = "0.4", features = ["kv"] }
|
|
||||||
env_logger = "0.11"
|
|
||||||
url = "2.5"
|
|
||||||
base64 = "0.22.1"
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
use harmony::{
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::{
|
|
||||||
application::{
|
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
|
||||||
},
|
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
|
||||||
},
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
use harmony_macros::hurl;
|
|
||||||
use std::{path::PathBuf, sync::Arc};
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let application = Arc::new(RustWebapp {
|
|
||||||
name: "tryrust".to_string(),
|
|
||||||
project_root: PathBuf::from(".."),
|
|
||||||
framework: Some(RustWebFramework::Leptos),
|
|
||||||
service_port: 8080,
|
|
||||||
});
|
|
||||||
|
|
||||||
let discord_webhook = DiscordWebhook {
|
|
||||||
name: "harmony_demo".to_string(),
|
|
||||||
url: hurl!("http://not_a_url.com"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let app = ApplicationScore {
|
|
||||||
features: vec![
|
|
||||||
Box::new(PackagingDeployment {
|
|
||||||
application: application.clone(),
|
|
||||||
}),
|
|
||||||
Box::new(Monitoring {
|
|
||||||
application: application.clone(),
|
|
||||||
alert_receiver: vec![Box::new(discord_webhook)],
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
application,
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
|
||||||
K8sAnywhereTopology::from_env(),
|
|
||||||
vec![Box::new(app)],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,7 @@ use harmony::{
|
|||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
features::{ContinuousDelivery, Monitoring, rhob_monitoring::RHOBMonitoring},
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
@@ -16,24 +16,24 @@ use std::{path::PathBuf, sync::Arc};
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "harmony-example-tryrust".to_string(),
|
name: "harmony-example-tryrust".to_string(),
|
||||||
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
project_root: PathBuf::from("./tryrust.org"),
|
||||||
// submodule
|
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 8080,
|
service_port: 8080,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Define your Application deployment and the features you want
|
let discord_receiver = DiscordWebhook {
|
||||||
|
name: "test-discord".to_string(),
|
||||||
|
url: hurl!("https://discord.doesnt.exist.com"),
|
||||||
|
};
|
||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
features: vec![
|
features: vec![
|
||||||
Box::new(PackagingDeployment {
|
Box::new(ContinuousDelivery {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
}),
|
}),
|
||||||
Box::new(Monitoring {
|
Box::new(RHOBMonitoring {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
alert_receiver: vec![Box::new(DiscordWebhook {
|
alert_receiver: vec![Box::new(discord_receiver)],
|
||||||
name: "test-discord".to_string(),
|
|
||||||
url: hurl!("https://discord.doesnt.exist.com"),
|
|
||||||
})],
|
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
application,
|
application,
|
||||||
@@ -41,7 +41,7 @@ async fn main() {
|
|||||||
|
|
||||||
harmony_cli::run(
|
harmony_cli::run(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned k3d by default or connect to any kubernetes cluster
|
K8sAnywhereTopology::from_env(),
|
||||||
vec![Box::new(app)],
|
vec![Box::new(app)],
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ pub enum InterpretName {
|
|||||||
CephClusterHealth,
|
CephClusterHealth,
|
||||||
Custom(&'static str),
|
Custom(&'static str),
|
||||||
RHOBAlerting,
|
RHOBAlerting,
|
||||||
K8sIngress,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for InterpretName {
|
impl std::fmt::Display for InterpretName {
|
||||||
@@ -65,7 +64,6 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||||
InterpretName::Custom(name) => f.write_str(name),
|
InterpretName::Custom(name) => f.write_str(name),
|
||||||
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
|
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
|
||||||
InterpretName::K8sIngress => f.write_str("K8sIngress"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -84,15 +82,13 @@ pub trait Interpret<T>: std::fmt::Debug + Send {
|
|||||||
pub struct Outcome {
|
pub struct Outcome {
|
||||||
pub status: InterpretStatus,
|
pub status: InterpretStatus,
|
||||||
pub message: String,
|
pub message: String,
|
||||||
pub details: Vec<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Outcome {
|
impl Outcome {
|
||||||
pub fn noop(message: String) -> Self {
|
pub fn noop() -> Self {
|
||||||
Self {
|
Self {
|
||||||
status: InterpretStatus::NOOP,
|
status: InterpretStatus::NOOP,
|
||||||
message,
|
message: String::new(),
|
||||||
details: vec![],
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,23 +96,6 @@ impl Outcome {
|
|||||||
Self {
|
Self {
|
||||||
status: InterpretStatus::SUCCESS,
|
status: InterpretStatus::SUCCESS,
|
||||||
message,
|
message,
|
||||||
details: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn success_with_details(message: String, details: Vec<String>) -> Self {
|
|
||||||
Self {
|
|
||||||
status: InterpretStatus::SUCCESS,
|
|
||||||
message,
|
|
||||||
details,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn running(message: String) -> Self {
|
|
||||||
Self {
|
|
||||||
status: InterpretStatus::RUNNING,
|
|
||||||
message,
|
|
||||||
details: vec![],
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -194,11 +173,3 @@ impl From<String> for InterpretError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<serde_yaml::Error> for InterpretError {
|
|
||||||
fn from(value: serde_yaml::Error) -> Self {
|
|
||||||
Self {
|
|
||||||
msg: format!("InterpretError : {value}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -372,9 +372,7 @@ impl K8sAnywhereTopology {
|
|||||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||||
match k8s_state.source {
|
match k8s_state.source {
|
||||||
K8sSource::LocalK3d => {
|
K8sSource::LocalK3d => {
|
||||||
warn!(
|
warn!("Installing observability operator is not supported on LocalK3d source");
|
||||||
"Installing observability operator is not supported on LocalK3d source"
|
|
||||||
);
|
|
||||||
return Ok(PreparationOutcome::Noop);
|
return Ok(PreparationOutcome::Noop);
|
||||||
debug!("installing cluster observability operator");
|
debug!("installing cluster observability operator");
|
||||||
todo!();
|
todo!();
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
||||||
|
|
||||||
#[derive(new, Clone, Debug, Serialize, Deserialize)]
|
#[derive(new)]
|
||||||
pub struct LocalhostTopology;
|
pub struct LocalhostTopology;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
use std::error::Error;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{executors::ExecutorError, topology::Topology};
|
use crate::topology::Topology;
|
||||||
|
|
||||||
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
||||||
/// ContinuousIntegration, ContinuousDelivery
|
/// ContinuousIntegration, ContinuousDelivery
|
||||||
@@ -12,10 +9,7 @@ use crate::{executors::ExecutorError, topology::Topology};
|
|||||||
pub trait ApplicationFeature<T: Topology>:
|
pub trait ApplicationFeature<T: Topology>:
|
||||||
std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T>
|
std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T>
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String>;
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError>;
|
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,60 +40,3 @@ impl<T: Topology> Clone for Box<dyn ApplicationFeature<T>> {
|
|||||||
self.clone_box()
|
self.clone_box()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub enum InstallationOutcome {
|
|
||||||
Success { details: Vec<String> },
|
|
||||||
Noop,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InstallationOutcome {
|
|
||||||
pub fn success() -> Self {
|
|
||||||
Self::Success { details: vec![] }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn success_with_details(details: Vec<String>) -> Self {
|
|
||||||
Self::Success { details }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn noop() -> Self {
|
|
||||||
Self::Noop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, new)]
|
|
||||||
pub struct InstallationError {
|
|
||||||
msg: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for InstallationError {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(&self.msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for InstallationError {}
|
|
||||||
|
|
||||||
impl From<ExecutorError> for InstallationError {
|
|
||||||
fn from(value: ExecutorError) -> Self {
|
|
||||||
Self {
|
|
||||||
msg: format!("InstallationError : {value}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<kube::Error> for InstallationError {
|
|
||||||
fn from(value: kube::Error) -> Self {
|
|
||||||
Self {
|
|
||||||
msg: format!("InstallationError : {value}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<String> for InstallationError {
|
|
||||||
fn from(value: String) -> Self {
|
|
||||||
Self {
|
|
||||||
msg: format!("PreparationError : {value}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use crate::{
|
|||||||
data::Version,
|
data::Version,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::application::{
|
modules::application::{
|
||||||
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
ApplicationFeature, HelmPackage, OCICompliant,
|
||||||
features::{ArgoApplication, ArgoHelmScore},
|
features::{ArgoApplication, ArgoHelmScore},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
@@ -47,11 +47,11 @@ use crate::{
|
|||||||
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
||||||
/// - Kubernetes for runtime orchestration
|
/// - Kubernetes for runtime orchestration
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Default, Clone)]
|
||||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
|
pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
|
||||||
pub application: Arc<A>,
|
pub application: Arc<A>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||||
async fn deploy_to_local_k3d(
|
async fn deploy_to_local_k3d(
|
||||||
&self,
|
&self,
|
||||||
app_name: String,
|
app_name: String,
|
||||||
@@ -139,12 +139,9 @@ impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
|||||||
impl<
|
impl<
|
||||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||||
> ApplicationFeature<T> for PackagingDeployment<A>
|
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
let image = self.application.image_name();
|
let image = self.application.image_name();
|
||||||
let domain = topology
|
let domain = topology
|
||||||
.get_domain(&self.application.name())
|
.get_domain(&self.application.name())
|
||||||
@@ -208,11 +205,7 @@ impl<
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
Ok(())
|
||||||
Ok(InstallationOutcome::success_with_details(vec![format!(
|
|
||||||
"{}: http://{domain}",
|
|
||||||
self.application.name()
|
|
||||||
)]))
|
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"ContinuousDelivery".to_string()
|
"ContinuousDelivery".to_string()
|
||||||
@@ -2,7 +2,7 @@ use async_trait::async_trait;
|
|||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::application::{ApplicationFeature, InstallationError, InstallationOutcome},
|
modules::application::ApplicationFeature,
|
||||||
topology::{K8sclient, Topology},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -29,10 +29,7 @@ impl Default for PublicEndpoint {
|
|||||||
/// For now we only suport K8s ingress, but we will support more stuff at some point
|
/// For now we only suport K8s ingress, but we will support more stuff at some point
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
|
impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
_topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
info!(
|
info!(
|
||||||
"Making sure public endpoint is installed for port {}",
|
"Making sure public endpoint is installed for port {}",
|
||||||
self.application_port
|
self.application_port
|
||||||
|
|||||||
@@ -55,8 +55,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let k8s_client = topology.k8s_client().await?;
|
let k8s_client = topology.k8s_client().await?;
|
||||||
let svc = format!("argo-{}", self.score.namespace.clone());
|
let domain = topology.get_domain("argo").await?;
|
||||||
let domain = topology.get_domain(&svc).await?;
|
|
||||||
let helm_score =
|
let helm_score =
|
||||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||||
|
|
||||||
@@ -67,17 +66,14 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(
|
Ok(Outcome::success(format!(
|
||||||
format!(
|
"ArgoCD installed with {} {}",
|
||||||
"ArgoCD {} {}",
|
self.argo_apps.len(),
|
||||||
self.argo_apps.len(),
|
match self.argo_apps.len() {
|
||||||
match self.argo_apps.len() {
|
1 => "application",
|
||||||
1 => "application",
|
_ => "applications",
|
||||||
_ => "applications",
|
}
|
||||||
}
|
)))
|
||||||
),
|
|
||||||
vec![format!("argo application: http://{}", domain)],
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
@@ -160,9 +156,6 @@ global:
|
|||||||
## Used for ingresses, certificates, SSO, notifications, etc.
|
## Used for ingresses, certificates, SSO, notifications, etc.
|
||||||
domain: {domain}
|
domain: {domain}
|
||||||
|
|
||||||
securityContext:
|
|
||||||
runAsUser: null
|
|
||||||
|
|
||||||
# -- Runtime class name for all components
|
# -- Runtime class name for all components
|
||||||
runtimeClassName: ""
|
runtimeClassName: ""
|
||||||
|
|
||||||
@@ -474,13 +467,6 @@ redis:
|
|||||||
# -- Redis name
|
# -- Redis name
|
||||||
name: redis
|
name: redis
|
||||||
|
|
||||||
serviceAccount:
|
|
||||||
create: true
|
|
||||||
|
|
||||||
securityContext:
|
|
||||||
runAsUser: null
|
|
||||||
|
|
||||||
|
|
||||||
## Redis image
|
## Redis image
|
||||||
image:
|
image:
|
||||||
# -- Redis repository
|
# -- Redis repository
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ pub use endpoint::*;
|
|||||||
mod monitoring;
|
mod monitoring;
|
||||||
pub use monitoring::*;
|
pub use monitoring::*;
|
||||||
|
|
||||||
mod packaging_deployment;
|
mod continuous_delivery;
|
||||||
pub use packaging_deployment::*;
|
pub use continuous_delivery::*;
|
||||||
|
|
||||||
mod helm_argocd_score;
|
mod helm_argocd_score;
|
||||||
pub use helm_argocd_score::*;
|
pub use helm_argocd_score::*;
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
use crate::modules::application::{
|
use crate::modules::application::{Application, ApplicationFeature};
|
||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
|
||||||
};
|
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||||
use crate::topology::MultiTargetTopology;
|
use crate::topology::MultiTargetTopology;
|
||||||
@@ -45,10 +43,7 @@ impl<
|
|||||||
+ std::fmt::Debug,
|
+ std::fmt::Debug,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
info!("Ensuring monitoring is available for application");
|
info!("Ensuring monitoring is available for application");
|
||||||
let namespace = topology
|
let namespace = topology
|
||||||
.get_tenant_config()
|
.get_tenant_config()
|
||||||
@@ -108,7 +103,7 @@ impl<
|
|||||||
.await
|
.await
|
||||||
.map_err(|e| e.to_string())?;
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
Ok(InstallationOutcome::success())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::modules::application::{
|
use crate::modules::application::{Application, ApplicationFeature};
|
||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
|
||||||
};
|
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||||
|
|
||||||
@@ -27,7 +25,7 @@ use harmony_types::net::Url;
|
|||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Monitoring {
|
pub struct RHOBMonitoring {
|
||||||
pub application: Arc<dyn Application>,
|
pub application: Arc<dyn Application>,
|
||||||
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||||
}
|
}
|
||||||
@@ -43,12 +41,9 @@ impl<
|
|||||||
+ Ingress
|
+ Ingress
|
||||||
+ std::fmt::Debug
|
+ std::fmt::Debug
|
||||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for RHOBMonitoring
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
&self,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
|
||||||
info!("Ensuring monitoring is available for application");
|
info!("Ensuring monitoring is available for application");
|
||||||
let namespace = topology
|
let namespace = topology
|
||||||
.get_tenant_config()
|
.get_tenant_config()
|
||||||
@@ -64,13 +59,12 @@ impl<
|
|||||||
application: self.application.clone(),
|
application: self.application.clone(),
|
||||||
receivers: self.alert_receiver.clone(),
|
receivers: self.alert_receiver.clone(),
|
||||||
};
|
};
|
||||||
let domain = topology
|
|
||||||
.get_domain("ntfy")
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("could not get domain {e}"))?;
|
|
||||||
let ntfy = NtfyScore {
|
let ntfy = NtfyScore {
|
||||||
namespace: namespace.clone(),
|
namespace: namespace.clone(),
|
||||||
host: domain.clone(),
|
host: topology
|
||||||
|
.get_domain("ntfy")
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Could not get domain {e}"))?,
|
||||||
};
|
};
|
||||||
ntfy.interpret(&Inventory::empty(), topology)
|
ntfy.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
@@ -92,33 +86,27 @@ impl<
|
|||||||
.replace("=", "");
|
.replace("=", "");
|
||||||
|
|
||||||
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||||
|
|
||||||
let ntfy_receiver = WebhookReceiver {
|
let ntfy_receiver = WebhookReceiver {
|
||||||
name: "ntfy-webhook".to_string(),
|
name: "ntfy-webhook".to_string(),
|
||||||
url: Url::Url(
|
url: Url::Url(
|
||||||
url::Url::parse(
|
url::Url::parse(
|
||||||
format!(
|
format!(
|
||||||
"http://{domain}/{}?auth={ntfy_default_auth_param}",
|
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||||
self.application.name()
|
namespace.clone()
|
||||||
)
|
)
|
||||||
.as_str(),
|
.as_str(),
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
debug!(
|
|
||||||
"ntfy webhook receiver \n{:#?}\nntfy topic: {}",
|
|
||||||
ntfy_receiver.clone(),
|
|
||||||
self.application.name()
|
|
||||||
);
|
|
||||||
alerting_score.receivers.push(Box::new(ntfy_receiver));
|
alerting_score.receivers.push(Box::new(ntfy_receiver));
|
||||||
alerting_score
|
alerting_score
|
||||||
.interpret(&Inventory::empty(), topology)
|
.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| e.to_string())?;
|
.map_err(|e| e.to_string())?;
|
||||||
Ok(InstallationOutcome::success_with_details(vec![format!(
|
Ok(())
|
||||||
"ntfy topic: {}",
|
|
||||||
self.application.name()
|
|
||||||
)]))
|
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"Monitoring".to_string()
|
"Monitoring".to_string()
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ use harmony_types::id::Id;
|
|||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum ApplicationFeatureStatus {
|
pub enum ApplicationFeatureStatus {
|
||||||
Installing,
|
Installing,
|
||||||
Installed { details: Vec<String> },
|
Installed,
|
||||||
Failed { message: String },
|
Failed { details: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Application: std::fmt::Debug + Send + Sync {
|
pub trait Application: std::fmt::Debug + Send + Sync {
|
||||||
@@ -65,32 +65,27 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let _ = match feature.ensure_installed(topology).await {
|
let _ = match feature.ensure_installed(topology).await {
|
||||||
Ok(outcome) => {
|
Ok(()) => {
|
||||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||||
topology: topology.name().into(),
|
topology: topology.name().into(),
|
||||||
application: self.application.name(),
|
application: self.application.name(),
|
||||||
feature: feature.name(),
|
feature: feature.name(),
|
||||||
status: ApplicationFeatureStatus::Installed {
|
status: ApplicationFeatureStatus::Installed,
|
||||||
details: match outcome {
|
|
||||||
InstallationOutcome::Success { details } => details,
|
|
||||||
InstallationOutcome::Noop => vec![],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(msg) => {
|
||||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||||
topology: topology.name().into(),
|
topology: topology.name().into(),
|
||||||
application: self.application.name(),
|
application: self.application.name(),
|
||||||
feature: feature.name(),
|
feature: feature.name(),
|
||||||
status: ApplicationFeatureStatus::Failed {
|
status: ApplicationFeatureStatus::Failed {
|
||||||
message: error.to_string(),
|
details: msg.clone(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!(
|
||||||
"Application Interpret failed to install feature : {error}"
|
"Application Interpret failed to install feature : {msg}"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use dockerfile_builder::Dockerfile;
|
|||||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use log::{debug, error, info, log_enabled, trace, warn};
|
use log::{debug, info, log_enabled};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tar::{Builder, Header};
|
use tar::{Builder, Header};
|
||||||
use walkdir::WalkDir;
|
use walkdir::WalkDir;
|
||||||
@@ -162,7 +162,7 @@ impl RustWebapp {
|
|||||||
&self,
|
&self,
|
||||||
image_name: &str,
|
image_name: &str,
|
||||||
) -> Result<String, Box<dyn std::error::Error>> {
|
) -> Result<String, Box<dyn std::error::Error>> {
|
||||||
info!("Generating Dockerfile for '{}'", self.name);
|
debug!("Generating Dockerfile for '{}'", self.name);
|
||||||
let dockerfile = self.get_or_build_dockerfile();
|
let dockerfile = self.get_or_build_dockerfile();
|
||||||
let quiet = !log_enabled!(log::Level::Debug);
|
let quiet = !log_enabled!(log::Level::Debug);
|
||||||
match dockerfile
|
match dockerfile
|
||||||
@@ -194,41 +194,8 @@ impl RustWebapp {
|
|||||||
Some(body_full(tar_data.into())),
|
Some(body_full(tar_data.into())),
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(mut msg) = image_build_stream.next().await {
|
while let Some(msg) = image_build_stream.next().await {
|
||||||
trace!("Got bollard msg {msg:?}");
|
debug!("Message: {msg:?}");
|
||||||
match msg {
|
|
||||||
Ok(mut msg) => {
|
|
||||||
if let Some(progress) = msg.progress_detail {
|
|
||||||
info!(
|
|
||||||
"Build progress {}/{}",
|
|
||||||
progress.current.unwrap_or(0),
|
|
||||||
progress.total.unwrap_or(0)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(mut log) = msg.stream {
|
|
||||||
if log.ends_with('\n') {
|
|
||||||
log.pop();
|
|
||||||
if log.ends_with('\r') {
|
|
||||||
log.pop();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
info!("{log}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(error) = msg.error {
|
|
||||||
warn!("Build error : {error:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(error) = msg.error_detail {
|
|
||||||
warn!("Build error : {error:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("Build failed : {e}");
|
|
||||||
return Err(format!("Build failed : {e}").into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(image_name.to_string())
|
Ok(image_name.to_string())
|
||||||
@@ -257,7 +224,6 @@ impl RustWebapp {
|
|||||||
".harmony_generated",
|
".harmony_generated",
|
||||||
"harmony",
|
"harmony",
|
||||||
"node_modules",
|
"node_modules",
|
||||||
"Dockerfile.harmony",
|
|
||||||
];
|
];
|
||||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|||||||
@@ -1,106 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Debug)]
|
|
||||||
pub struct GenerateCaCertScore {
|
|
||||||
cluster_issuer_name: String,
|
|
||||||
dns_names: String,
|
|
||||||
operator_namespace: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for GenerateCaCertScore {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"GenerateCaCertScore".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(GenerateCaCertIntepret {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Debug)]
|
|
||||||
pub struct GenerateCaCertIntepret {
|
|
||||||
score: GenerateCaCertScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for GenerateCaCertIntepret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let client = topology.k8s_client().await.unwrap();
|
|
||||||
let cert_yaml = self
|
|
||||||
.build_cert_request_yaml(&self.score.cluster_issuer_name, &self.score.dns_names)
|
|
||||||
.unwrap();
|
|
||||||
self.apply_cert_request(&client, cert_yaml, &self.score.operator_namespace)
|
|
||||||
.await?;
|
|
||||||
Ok(Outcome::success("created ca cert".to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("GenerateCaCertInterpret")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GenerateCaCertIntepret {
|
|
||||||
pub fn build_cert_request_yaml(
|
|
||||||
&self,
|
|
||||||
cluster_issuer_name: &str,
|
|
||||||
dns_names: &str,
|
|
||||||
) -> Result<serde_yaml::Value, InterpretError> {
|
|
||||||
let cert_yaml = format!(
|
|
||||||
r#"
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: ingress-cert
|
|
||||||
namespace: openshift-ingress
|
|
||||||
spec:
|
|
||||||
secretName: ingress-cert-tls
|
|
||||||
issuerRef:
|
|
||||||
name: {cluster_issuer_name}
|
|
||||||
kind: ClusterIssuer
|
|
||||||
dnsNames:
|
|
||||||
- "*.{dns_names}"
|
|
||||||
"#
|
|
||||||
);
|
|
||||||
Ok(serde_yaml::to_value(cert_yaml)?)
|
|
||||||
}
|
|
||||||
pub async fn apply_cert_request(
|
|
||||||
&self,
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
cert_yaml: serde_yaml::Value,
|
|
||||||
operator_namespace: &str,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
Ok(client
|
|
||||||
.apply_yaml(&cert_yaml, Some(operator_namespace))
|
|
||||||
.await?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,3 +1,2 @@
|
|||||||
mod gen_ca_cert;
|
|
||||||
mod helm;
|
mod helm;
|
||||||
pub use helm::*;
|
pub use helm::*;
|
||||||
|
|||||||
@@ -69,14 +69,17 @@ impl DhcpInterpret {
|
|||||||
|
|
||||||
dhcp_server.set_pxe_options(pxe_options).await?;
|
dhcp_server.set_pxe_options(pxe_options).await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.boot_filename,
|
format!(
|
||||||
self.score.boot_filename,
|
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
|
||||||
self.score.filename,
|
self.score.boot_filename,
|
||||||
self.score.filename64,
|
self.score.boot_filename,
|
||||||
self.score.filenameipxe
|
self.score.filename,
|
||||||
)))
|
self.score.filename64,
|
||||||
|
self.score.filenameipxe
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,7 +122,8 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
|
|||||||
|
|
||||||
topology.commit_config().await?;
|
topology.commit_config().await?;
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"Dhcp Interpret execution successful".to_string(),
|
"Dhcp Interpret execution successful".to_string(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -193,10 +197,10 @@ impl DhcpHostBindingInterpret {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Dhcp Interpret registered {} entries",
|
InterpretStatus::SUCCESS,
|
||||||
number_new_entries
|
format!("Dhcp Interpret registered {} entries", number_new_entries),
|
||||||
)))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,9 +236,12 @@ impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret {
|
|||||||
|
|
||||||
topology.commit_config().await?;
|
topology.commit_config().await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Dhcp Host Binding Interpret execution successful on {} hosts",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.host_binding.len()
|
format!(
|
||||||
)))
|
"Dhcp Host Binding Interpret execution successful on {} hosts",
|
||||||
|
self.score.host_binding.len()
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,7 +55,8 @@ impl DnsInterpret {
|
|||||||
dns.register_dhcp_leases(register).await?;
|
dns.register_dhcp_leases(register).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"DNS Interpret execution successfull".to_string(),
|
"DNS Interpret execution successfull".to_string(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -67,10 +68,13 @@ impl DnsInterpret {
|
|||||||
let entries = &self.score.dns_entries;
|
let entries = &self.score.dns_entries;
|
||||||
dns_server.ensure_hosts_registered(entries.clone()).await?;
|
dns_server.ensure_hosts_registered(entries.clone()).await?;
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"DnsInterpret registered {} hosts successfully",
|
InterpretStatus::SUCCESS,
|
||||||
entries.len()
|
format!(
|
||||||
)))
|
"DnsInterpret registered {} hosts successfully",
|
||||||
|
entries.len()
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,7 +111,8 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
|
|||||||
|
|
||||||
topology.commit_config().await?;
|
topology.commit_config().await?;
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"Dns Interpret execution successful".to_string(),
|
"Dns Interpret execution successful".to_string(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -197,10 +197,13 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
|||||||
self.score.release_name, ns
|
self.score.release_name, ns
|
||||||
);
|
);
|
||||||
|
|
||||||
return Ok(Outcome::success(format!(
|
return Ok(Outcome::new(
|
||||||
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.release_name
|
format!(
|
||||||
)));
|
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
||||||
|
self.score.release_name
|
||||||
|
),
|
||||||
|
));
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
||||||
@@ -225,18 +228,18 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match status {
|
match status {
|
||||||
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!(
|
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
|
||||||
"Helm Chart {} deployed",
|
InterpretStatus::SUCCESS,
|
||||||
self.score.release_name
|
format!("Helm Chart {} deployed", self.score.release_name),
|
||||||
))),
|
)),
|
||||||
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!(
|
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
|
||||||
"Helm Chart {} pending install...",
|
InterpretStatus::RUNNING,
|
||||||
self.score.release_name
|
format!("Helm Chart {} pending install...", self.score.release_name),
|
||||||
))),
|
)),
|
||||||
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!(
|
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
|
||||||
"Helm Chart {} pending upgrade...",
|
InterpretStatus::RUNNING,
|
||||||
self.score.release_name
|
format!("Helm Chart {} pending upgrade...", self.score.release_name),
|
||||||
))),
|
)),
|
||||||
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
|
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
|
||||||
"Helm Chart {} installation failed",
|
"Helm Chart {} installation failed",
|
||||||
self.score.release_name
|
self.score.release_name
|
||||||
|
|||||||
@@ -133,9 +133,10 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
Ok(Outcome::success(
|
Ok(Outcome {
|
||||||
"Discovery process completed successfully".to_string(),
|
status: InterpretStatus::SUCCESS,
|
||||||
))
|
message: "Discovery process completed successfully".to_string(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use harmony_macros::ingress_path;
|
use harmony_macros::ingress_path;
|
||||||
use harmony_types::id::Id;
|
|
||||||
use k8s_openapi::api::networking::v1::Ingress;
|
use k8s_openapi::api::networking::v1::Ingress;
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
interpret::Interpret,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{K8sclient, Topology},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
@@ -61,7 +57,7 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
|||||||
|
|
||||||
let ingress_class = match self.ingress_class_name.clone() {
|
let ingress_class = match self.ingress_class_name.clone() {
|
||||||
Some(ingress_class_name) => ingress_class_name,
|
Some(ingress_class_name) => ingress_class_name,
|
||||||
None => "\"default\"".to_string(),
|
None => format!("\"default\""),
|
||||||
};
|
};
|
||||||
|
|
||||||
let ingress = json!(
|
let ingress = json!(
|
||||||
@@ -101,12 +97,11 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
|||||||
"Successfully built Ingress for host {:?}",
|
"Successfully built Ingress for host {:?}",
|
||||||
ingress.metadata.name
|
ingress.metadata.name
|
||||||
);
|
);
|
||||||
|
Box::new(K8sResourceInterpret {
|
||||||
Box::new(K8sIngressInterpret {
|
score: K8sResourceScore::single(
|
||||||
ingress,
|
ingress.clone(),
|
||||||
service: self.name.to_string(),
|
self.namespace.clone().map(|f| f.to_string()),
|
||||||
namespace: self.namespace.clone().map(|f| f.to_string()),
|
),
|
||||||
host: self.host.clone(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,62 +109,3 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
|||||||
format!("{} K8sIngressScore", self.name)
|
format!("{} K8sIngressScore", self.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(std::fmt::Debug)]
|
|
||||||
struct K8sIngressInterpret {
|
|
||||||
ingress: Ingress,
|
|
||||||
service: String,
|
|
||||||
namespace: Option<String>,
|
|
||||||
host: fqdn::FQDN,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for K8sIngressInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let result = K8sResourceInterpret {
|
|
||||||
score: K8sResourceScore::single(self.ingress.clone(), self.namespace.clone()),
|
|
||||||
}
|
|
||||||
.execute(inventory, topology)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => {
|
|
||||||
let details = match &self.namespace {
|
|
||||||
Some(namespace) => {
|
|
||||||
vec![format!(
|
|
||||||
"{} ({namespace}): http://{}",
|
|
||||||
self.service, self.host
|
|
||||||
)]
|
|
||||||
}
|
|
||||||
None => vec![format!("{}: {}", self.service, self.host)],
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(outcome.message, details))
|
|
||||||
}
|
|
||||||
_ => Ok(outcome),
|
|
||||||
},
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::K8sIngress
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
Version::from("0.0.1").unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -35,24 +35,6 @@ pub struct DiscordWebhook {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||||
let ns = sender.namespace.clone();
|
|
||||||
let secret_name = format!("{}-secret", self.name.clone());
|
|
||||||
let webhook_key = format!("{}", self.url.clone());
|
|
||||||
|
|
||||||
let mut string_data = BTreeMap::new();
|
|
||||||
string_data.insert("webhook-url".to_string(), webhook_key.clone());
|
|
||||||
|
|
||||||
let secret = Secret {
|
|
||||||
metadata: kube::core::ObjectMeta {
|
|
||||||
name: Some(secret_name.clone()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
string_data: Some(string_data),
|
|
||||||
type_: Some("Opaque".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let _ = sender.client.apply(&secret, Some(&ns)).await;
|
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
"route": {
|
"route": {
|
||||||
@@ -61,14 +43,9 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
|||||||
"receivers": [
|
"receivers": [
|
||||||
{
|
{
|
||||||
"name": self.name,
|
"name": self.name,
|
||||||
"discordConfigs": [
|
"webhookConfigs": [
|
||||||
{
|
{
|
||||||
"apiURL": {
|
"url": self.url,
|
||||||
"name": secret_name,
|
|
||||||
"key": "webhook-url",
|
|
||||||
},
|
|
||||||
"title": "{{ template \"discord.default.title\" . }}",
|
|
||||||
"message": "{{ template \"discord.default.message\" . }}"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,11 +43,6 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
|||||||
"webhookConfigs": [
|
"webhookConfigs": [
|
||||||
{
|
{
|
||||||
"url": self.url,
|
"url": self.url,
|
||||||
"httpConfig": {
|
|
||||||
"tlsConfig": {
|
|
||||||
"insecureSkipVerify": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,9 +68,7 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
|||||||
PreparationOutcome::Success { details: _ } => {
|
PreparationOutcome::Success { details: _ } => {
|
||||||
Ok(Outcome::success("Prometheus installed".into()))
|
Ok(Outcome::success("Prometheus installed".into()))
|
||||||
}
|
}
|
||||||
PreparationOutcome::Noop => {
|
PreparationOutcome::Noop => Ok(Outcome::noop()),
|
||||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
Err(err) => Err(InterpretError::from(err)),
|
Err(err) => Err(InterpretError::from(err)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,9 +70,7 @@ impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret
|
|||||||
PreparationOutcome::Success { details: _ } => {
|
PreparationOutcome::Success { details: _ } => {
|
||||||
Ok(Outcome::success("Prometheus installed".into()))
|
Ok(Outcome::success("Prometheus installed".into()))
|
||||||
}
|
}
|
||||||
PreparationOutcome::Noop => {
|
PreparationOutcome::Noop => Ok(Outcome::noop()),
|
||||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
Err(err) => Err(InterpretError::from(err)),
|
Err(err) => Err(InterpretError::from(err)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -113,13 +113,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
|||||||
.await?;
|
.await?;
|
||||||
info!("user added");
|
info!("user added");
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(
|
Ok(Outcome::success("Ntfy installed".to_string()))
|
||||||
"Ntfy installed".to_string(),
|
|
||||||
vec![format!(
|
|
||||||
"Ntfy ({}): http://{}",
|
|
||||||
self.score.namespace, self.score.host
|
|
||||||
)],
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{error, info, warn};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
hardware::PhysicalHost,
|
hardware::PhysicalHost,
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::inventory::DiscoverHostForRoleScore,
|
modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::info;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
|
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
|
||||||
// - This score exposes/ensures the default inventory assets and waits for discoveries.
|
// - This score exposes/ensures the default inventory assets and waits for discoveries.
|
||||||
@@ -109,9 +109,12 @@ When you can dig them, confirm to continue.
|
|||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::new(
|
||||||
"Found and assigned bootstrap node: {}",
|
InterpretStatus::SUCCESS,
|
||||||
bootstrap_host.unwrap().summary()
|
format!(
|
||||||
)))
|
"Found and assigned bootstrap node: {}",
|
||||||
|
bootstrap_host.unwrap().summary()
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,25 @@
|
|||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::secret::{RedhatSecret, SshKeyPair},
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
data::{FileContent, FilePath, Version},
|
data::{FileContent, FilePath, Version},
|
||||||
hardware::PhysicalHost,
|
hardware::PhysicalHost,
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::{
|
modules::{
|
||||||
dhcp::DhcpHostBindingScore,
|
dhcp::DhcpHostBindingScore,
|
||||||
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
okd::{
|
okd::{
|
||||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
@@ -16,15 +28,6 @@ use crate::{
|
|||||||
score::Score,
|
score::Score,
|
||||||
topology::{HAClusterTopology, HostBinding},
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::{debug, info};
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 02: Bootstrap
|
// Step 02: Bootstrap
|
||||||
// - Select bootstrap node (from discovered set).
|
// - Select bootstrap node (from discovered set).
|
||||||
@@ -310,7 +313,7 @@ impl OKDSetup02BootstrapInterpret {
|
|||||||
info!("[Bootstrap] Rebooting bootstrap node via SSH");
|
info!("[Bootstrap] Rebooting bootstrap node via SSH");
|
||||||
// TODO reboot programatically, there are some logical checks and refactoring to do such as
|
// TODO reboot programatically, there are some logical checks and refactoring to do such as
|
||||||
// accessing the bootstrap node config (ip address) from the inventory
|
// accessing the bootstrap node config (ip address) from the inventory
|
||||||
let _ = inquire::Confirm::new(
|
let confirmation = inquire::Confirm::new(
|
||||||
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
|
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
|
||||||
)
|
)
|
||||||
.prompt()
|
.prompt()
|
||||||
@@ -376,6 +379,9 @@ impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
|
|||||||
self.reboot_target().await?;
|
self.reboot_target().await?;
|
||||||
self.wait_for_bootstrap_complete().await?;
|
self.wait_for_bootstrap_complete().await?;
|
||||||
|
|
||||||
Ok(Outcome::success("Bootstrap phase complete".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Bootstrap phase complete".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,11 @@
|
|||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{debug, info};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
hardware::PhysicalHost,
|
hardware::PhysicalHost,
|
||||||
@@ -11,12 +19,6 @@ use crate::{
|
|||||||
score::Score,
|
score::Score,
|
||||||
topology::{HAClusterTopology, HostBinding},
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::{debug, info};
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 03: Control Plane
|
// Step 03: Control Plane
|
||||||
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
|
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
|
||||||
@@ -267,7 +269,8 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
|||||||
// the `wait-for bootstrap-complete` command.
|
// the `wait-for bootstrap-complete` command.
|
||||||
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
||||||
|
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
"Control plane provisioning has been successfully initiated.".into(),
|
"Control plane provisioning has been successfully initiated.".into(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,33 @@
|
|||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::info;
|
use log::{debug, error, info, warn};
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
|
data::{FileContent, FilePath, Version},
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
|
okd::{
|
||||||
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
|
},
|
||||||
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 04: Workers
|
// Step 04: Workers
|
||||||
// - Render per-MAC PXE & ignition for workers; join nodes.
|
// - Render per-MAC PXE & ignition for workers; join nodes.
|
||||||
@@ -78,6 +94,9 @@ impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
|||||||
_topology: &HAClusterTopology,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.render_and_reboot().await?;
|
self.render_and_reboot().await?;
|
||||||
Ok(Outcome::success("Workers provisioned".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Workers provisioned".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,33 @@
|
|||||||
use crate::{
|
use std::{fmt::Write, path::PathBuf};
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::HAClusterTopology,
|
|
||||||
};
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::info;
|
use log::{debug, error, info, warn};
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
|
data::{FileContent, FilePath, Version},
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
|
okd::{
|
||||||
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{HAClusterTopology, HostBinding},
|
||||||
|
};
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
// Step 05: Sanity Check
|
// Step 05: Sanity Check
|
||||||
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
|
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
|
||||||
@@ -76,6 +93,9 @@ impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
|
|||||||
_topology: &HAClusterTopology,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.run_checks().await?;
|
self.run_checks().await?;
|
||||||
Ok(Outcome::success("Sanity checks passed".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Sanity checks passed".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,32 @@
|
|||||||
|
// -------------------------------------------------------------------------------------------------
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_secret::SecretManager;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::info;
|
use log::{debug, error, info, warn};
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{fmt::Write, path::PathBuf};
|
||||||
|
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
config::secret::{RedhatSecret, SshKeyPair},
|
||||||
|
data::{FileContent, FilePath, Version},
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
instrumentation::{HarmonyEvent, instrument},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
|
||||||
|
inventory::LaunchDiscoverInventoryAgentScore,
|
||||||
|
okd::{
|
||||||
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
|
||||||
|
templates::{BootstrapIpxeTpl, InstallConfigYaml},
|
||||||
|
},
|
||||||
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Step 06: Installation Report
|
// Step 06: Installation Report
|
||||||
@@ -76,6 +93,9 @@ impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
|
|||||||
_topology: &HAClusterTopology,
|
_topology: &HAClusterTopology,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
self.generate().await?;
|
self.generate().await?;
|
||||||
Ok(Outcome::success("Installation report generated".into()))
|
Ok(Outcome::new(
|
||||||
|
InterpretStatus::SUCCESS,
|
||||||
|
"Installation report generated".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ pub fn alert_pod_not_ready() -> PrometheusAlertRule {
|
|||||||
PrometheusAlertRule {
|
PrometheusAlertRule {
|
||||||
alert: "PodNotReady".into(),
|
alert: "PodNotReady".into(),
|
||||||
expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(),
|
expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(),
|
||||||
r#for: Some("30s".into()),
|
r#for: Some("2m".into()),
|
||||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||||
annotations: HashMap::from([
|
annotations: HashMap::from([
|
||||||
("summary".into(), "Pod is not ready".into()),
|
("summary".into(), "Pod is not ready".into()),
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ use std::process::Command;
|
|||||||
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
||||||
|
Alertmanager, AlertmanagerSpec,
|
||||||
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
|
||||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||||
GrafanaDatasourceSpec, GrafanaSpec,
|
GrafanaDatasourceSpec, GrafanaSpec,
|
||||||
@@ -22,8 +25,13 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
|
|||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
|
||||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||||
|
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
|
||||||
|
build_prom_role, build_prom_rolebinding, build_prom_service_account,
|
||||||
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
||||||
ServiceMonitor, ServiceMonitorSpec,
|
ServiceMonitor, ServiceMonitorSpec,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -178,10 +178,10 @@ fn handle_events() {
|
|||||||
ApplicationFeatureStatus::Installing => {
|
ApplicationFeatureStatus::Installing => {
|
||||||
info!("Installing feature '{feature}' for '{application}'...");
|
info!("Installing feature '{feature}' for '{application}'...");
|
||||||
}
|
}
|
||||||
ApplicationFeatureStatus::Installed { details: _ } => {
|
ApplicationFeatureStatus::Installed => {
|
||||||
info!(status = "finished"; "Feature '{feature}' installed");
|
info!(status = "finished"; "Feature '{feature}' installed");
|
||||||
}
|
}
|
||||||
ApplicationFeatureStatus::Failed { message: details } => {
|
ApplicationFeatureStatus::Failed { details } => {
|
||||||
error!(status = "failed"; "Feature '{feature}' installation failed: {details}");
|
error!(status = "failed"; "Feature '{feature}' installation failed: {details}");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,56 +0,0 @@
|
|||||||
use std::sync::Mutex;
|
|
||||||
|
|
||||||
use harmony::{
|
|
||||||
instrumentation::{self, HarmonyEvent},
|
|
||||||
modules::application::ApplicationFeatureStatus,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::theme;
|
|
||||||
|
|
||||||
pub fn init() {
|
|
||||||
let details: Mutex<Vec<String>> = Mutex::new(vec![]);
|
|
||||||
|
|
||||||
instrumentation::subscribe("Harmony CLI Reporter", {
|
|
||||||
move |event| {
|
|
||||||
let mut details = details.lock().unwrap();
|
|
||||||
|
|
||||||
match event {
|
|
||||||
HarmonyEvent::InterpretExecutionFinished {
|
|
||||||
execution_id: _,
|
|
||||||
topology: _,
|
|
||||||
interpret: _,
|
|
||||||
score: _,
|
|
||||||
outcome: Ok(outcome),
|
|
||||||
} => {
|
|
||||||
if outcome.status == harmony::interpret::InterpretStatus::SUCCESS {
|
|
||||||
details.extend(outcome.details.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
HarmonyEvent::ApplicationFeatureStateChanged {
|
|
||||||
topology: _,
|
|
||||||
application: _,
|
|
||||||
feature: _,
|
|
||||||
status:
|
|
||||||
ApplicationFeatureStatus::Installed {
|
|
||||||
details: feature_details,
|
|
||||||
},
|
|
||||||
} => {
|
|
||||||
details.extend(feature_details.clone());
|
|
||||||
}
|
|
||||||
HarmonyEvent::HarmonyFinished => {
|
|
||||||
if !details.is_empty() {
|
|
||||||
println!(
|
|
||||||
"\n{} All done! Here's what's next for you:",
|
|
||||||
theme::EMOJI_SUMMARY
|
|
||||||
);
|
|
||||||
for detail in details.iter() {
|
|
||||||
println!("- {detail}");
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -8,7 +8,6 @@ use inquire::Confirm;
|
|||||||
use log::debug;
|
use log::debug;
|
||||||
|
|
||||||
pub mod cli_logger; // FIXME: Don't make me pub
|
pub mod cli_logger; // FIXME: Don't make me pub
|
||||||
mod cli_reporter;
|
|
||||||
pub mod progress;
|
pub mod progress;
|
||||||
pub mod theme;
|
pub mod theme;
|
||||||
|
|
||||||
@@ -117,7 +116,6 @@ pub async fn run_cli<T: Topology + Send + Sync + 'static>(
|
|||||||
args: Args,
|
args: Args,
|
||||||
) -> Result<(), Box<dyn std::error::Error>> {
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
cli_logger::init();
|
cli_logger::init();
|
||||||
cli_reporter::init();
|
|
||||||
|
|
||||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
maestro.register_all(scores);
|
maestro.register_all(scores);
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ pub static EMOJI_ERROR: Emoji<'_, '_> = Emoji("⚠️", "");
|
|||||||
pub static EMOJI_DEPLOY: Emoji<'_, '_> = Emoji("🚀", "");
|
pub static EMOJI_DEPLOY: Emoji<'_, '_> = Emoji("🚀", "");
|
||||||
pub static EMOJI_TOPOLOGY: Emoji<'_, '_> = Emoji("📦", "");
|
pub static EMOJI_TOPOLOGY: Emoji<'_, '_> = Emoji("📦", "");
|
||||||
pub static EMOJI_SCORE: Emoji<'_, '_> = Emoji("🎶", "");
|
pub static EMOJI_SCORE: Emoji<'_, '_> = Emoji("🎶", "");
|
||||||
pub static EMOJI_SUMMARY: Emoji<'_, '_> = Emoji("🚀", "");
|
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref SECTION_STYLE: ProgressStyle = ProgressStyle::default_spinner()
|
pub static ref SECTION_STYLE: ProgressStyle = ProgressStyle::default_spinner()
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ pub fn handle_events() {
|
|||||||
|
|
||||||
instrumentation::subscribe("Harmony Composer Logger", {
|
instrumentation::subscribe("Harmony Composer Logger", {
|
||||||
move |event| match event {
|
move |event| match event {
|
||||||
|
HarmonyComposerEvent::HarmonyComposerStarted => {}
|
||||||
HarmonyComposerEvent::ProjectInitializationStarted => {
|
HarmonyComposerEvent::ProjectInitializationStarted => {
|
||||||
progress_tracker.add_section(
|
progress_tracker.add_section(
|
||||||
SETUP_SECTION,
|
SETUP_SECTION,
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use crate::{HarmonyProfile, HarmonyTarget};
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum HarmonyComposerEvent {
|
pub enum HarmonyComposerEvent {
|
||||||
|
HarmonyComposerStarted,
|
||||||
ProjectInitializationStarted,
|
ProjectInitializationStarted,
|
||||||
ProjectInitialized,
|
ProjectInitialized,
|
||||||
ProjectCompilationStarted {
|
ProjectCompilationStarted {
|
||||||
|
|||||||
@@ -120,10 +120,26 @@ impl SecretManager {
|
|||||||
|
|
||||||
let ns = &manager.namespace;
|
let ns = &manager.namespace;
|
||||||
let key = T::KEY;
|
let key = T::KEY;
|
||||||
let secret_json = inquire::Text::new(&format!(
|
let secret_json = inquire::Editor::new(&format!(
|
||||||
"Secret not found for {} {}, paste the JSON here :",
|
"Secret not found for {ns} {key}, paste the JSON here :",
|
||||||
ns, key
|
|
||||||
))
|
))
|
||||||
|
.with_formatter(&|data| {
|
||||||
|
let char_count = data.chars().count();
|
||||||
|
if char_count == 0 {
|
||||||
|
String::from("<skipped>")
|
||||||
|
} else if char_count <= 20 {
|
||||||
|
data.into()
|
||||||
|
} else {
|
||||||
|
let mut substr: String = data.chars().take(17).collect();
|
||||||
|
substr.push_str("...");
|
||||||
|
substr
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.with_render_config(
|
||||||
|
inquire::ui::RenderConfig::default().with_canceled_prompt_indicator(
|
||||||
|
inquire::ui::Styled::new("<skipped>").with_fg(inquire::ui::Color::DarkYellow),
|
||||||
|
),
|
||||||
|
)
|
||||||
.prompt()
|
.prompt()
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into())
|
SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into())
|
||||||
|
|||||||