Compare commits
25 Commits
secrets-pr
...
be385dccff
| Author | SHA1 | Date | |
|---|---|---|---|
| be385dccff | |||
| 6651194582 | |||
|
|
f31d21f9da | ||
|
|
902185daa4 | ||
|
|
8bcade27a1 | ||
| e7ccfe6969 | |||
| 4fdc2e8a58 | |||
| c5427b983c | |||
| f0d907d92f | |||
| a03327d7e4 | |||
| 680902e450 | |||
| b765e9b7dc | |||
| 160939de21 | |||
| 5142e2dd2d | |||
| ceea03d6ce | |||
| f1209b3823 | |||
| 6f746d4c88 | |||
| 75f27a2b85 | |||
| d24ea23413 | |||
| 0070373714 | |||
| f6e665f990 | |||
| 241980ebec | |||
| 35a459f63c | |||
| f076d36297 | |||
| 138e414727 |
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "examples/try_rust_webapp/tryrust.org"]
|
||||
path = examples/try_rust_webapp/tryrust.org
|
||||
url = https://github.com/rust-dd/tryrust.org.git
|
||||
61
Cargo.lock
generated
61
Cargo.lock
generated
@@ -1858,21 +1858,6 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-try-rust-webapp"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-tui"
|
||||
version = "0.1.0"
|
||||
@@ -2361,7 +2346,6 @@ dependencies = [
|
||||
"tokio-util",
|
||||
"url",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2433,7 +2417,6 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_yaml",
|
||||
"syn 2.0.106",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3124,7 +3107,6 @@ dependencies = [
|
||||
"fxhash",
|
||||
"newline-converter",
|
||||
"once_cell",
|
||||
"tempfile",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
@@ -4665,21 +4647,6 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rhob-application-monitoring"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.17.14"
|
||||
@@ -5020,15 +4987,6 @@ dependencies = [
|
||||
"cipher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "schannel"
|
||||
version = "0.1.27"
|
||||
@@ -6566,16 +6524,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
|
||||
dependencies = [
|
||||
"same-file",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "want"
|
||||
version = "0.3.1"
|
||||
@@ -6758,15 +6706,6 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22"
|
||||
dependencies = [
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
|
||||
15
Cargo.toml
15
Cargo.toml
@@ -14,8 +14,7 @@ members = [
|
||||
"harmony_composer",
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
"harmony_secret",
|
||||
"adr/agent_discovery/mdns",
|
||||
"harmony_secret", "adr/agent_discovery/mdns",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -51,7 +50,7 @@ k8s-openapi = { version = "0.25", features = ["v1_30"] }
|
||||
serde_yaml = "0.9"
|
||||
serde-value = "0.7"
|
||||
http = "1.2"
|
||||
inquire = { version = "0.7", features = ["editor"] }
|
||||
inquire = "0.7"
|
||||
convert_case = "0.8"
|
||||
chrono = "0.4"
|
||||
similar = "2"
|
||||
@@ -67,11 +66,5 @@ thiserror = "2.0.14"
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
askama = "0.14"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
|
||||
reqwest = { version = "0.12", features = [
|
||||
"blocking",
|
||||
"stream",
|
||||
"rustls-tls",
|
||||
"http2",
|
||||
"json",
|
||||
], default-features = false }
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
# Harmony, Orchestrateur d'infrastructure open-source
|
||||
|
||||
**Target Duration:** 25 minutes\
|
||||
**Tone:** Friendly, expert-to-expert, inspiring.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 1: Title Slide**
|
||||
|
||||
- **Visual:** Clean and simple. Your company logo (NationTech) and the Harmony logo.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 2: The YAML Labyrinth**
|
||||
|
||||
**Goal:** Get every head in the room nodding in agreement. Start with their world, not yours.
|
||||
|
||||
- **Visual:**
|
||||
- Option A: "The Pull Request from Hell". A screenshot of a GitHub pull request for a seemingly minor change that touches dozens of YAML files across multiple directories. A sea of red and green diffs that is visually overwhelming.
|
||||
- Option B: A complex flowchart connecting dozens of logos: Terraform, Ansible, K8s, Helm, etc.
|
||||
- **Narration:**\
|
||||
[...ADD SOMETHING FOR INTRODUCTION...]\
|
||||
"We love the power that tools like Kubernetes and the CNCF landscape have given us. But let's be honest... when did our infrastructure code start looking like _this_?"\
|
||||
"We have GitOps, which is great. But it often means we're managing this fragile cathedral of YAML, Helm charts, and brittle scripts. We spend more time debugging indentation and tracing variables than we do building truly resilient systems."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 3: The Real Cost of Infrastructure**
|
||||
|
||||
- **Visual:** "The Jenga Tower of Tools". A tall, precarious Jenga tower where each block is the logo of a different tool (Terraform, K8s, Helm, Ansible, Prometheus, ArgoCD, etc.). One block near the bottom is being nervously pulled out.
|
||||
- **Narration:**
|
||||
"The real cost isn't just complexity; it's the constant need to choose, learn, integrate, and operate a dozen different tools, each with its own syntax and failure modes. It's the nagging fear that a tiny typo in a config file could bring everything down. Click-ops isn't the answer, but the current state of IaC feels like we've traded one problem for another."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 4: The Broken Promise of "Code"**
|
||||
|
||||
**Goal:** Introduce the core idea before introducing the product. This makes the solution feel inevitable.
|
||||
|
||||
- **(Initial Visual):** A two-panel slide.
|
||||
- **Left Panel Title: "The Plan"** - A terminal showing a green, successful `terraform plan` output.
|
||||
- **Right Panel Title: "The Reality"** - The _next_ screen in the terminal, showing the `terraform apply` failing with a cascade of red error text.
|
||||
- **Narration:**
|
||||
"We call our discipline **Infrastructure as Code**. And we've all been here. Our 'compiler' is a `terraform plan` that says everything looks perfect. We get the green light."
|
||||
(Pause for a beat)
|
||||
"And then we `apply`, and reality hits. It fails halfway through, at runtime, when it's most expensive and painful to fix."
|
||||
|
||||
**(Click to transition the slide)**
|
||||
|
||||
- **(New Visual):** The entire slide is replaced by a clean screenshot of a code editor (like nvim 😉) showing Harmony's Rust DSL. A red squiggly line is under a config line. The error message is clear in the "Problems" panel: `error: Incompatible deployment. Production target 'gcp-prod-cluster' requires a StorageClass with 'snapshots' capability, but 'standard-sc' does not provide it.`
|
||||
- **Narration (continued):**
|
||||
"In software development, we solved these problems years ago. We don't accept 'it compiled, but crashed on startup'. We have real tools, type systems, compilers, test frameworks, and IDEs that catch our mistakes before they ever reach production. **So, what if we could treat our entire infrastructure... like a modern, compiled application?**"
|
||||
"What if your infrastructure code could get compile-time checks, straight into the editor... instead of runtime panics and failures at 3 AM in production?"
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 5: Introducing Harmony**
|
||||
|
||||
**Goal:** Introduce Harmony as the answer to the "What If?" question.
|
||||
|
||||
- **Visual:** The Harmony logo, large and centered.
|
||||
- **Tagline:** `Infrastructure in type-safe Rust. No YAML required.`
|
||||
- **Narration:**
|
||||
"This is Harmony. It's an open-source orchestrator that lets you define your entire stack — from a dev laptop to a multi-site bare-metal cluster—in a single, type-safe Rust codebase."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 6: Before & After**
|
||||
|
||||
- **Visual:** A side-by-side comparison. Left side: A screen full of complex, nested YAML. Right side: 10-15 lines of clean, readable Harmony Rust DSL that accomplishes the same thing.
|
||||
- **Narration:**
|
||||
"This is the difference. On the left, the fragile world of strings and templates. On the right, a portable, verifiable program that describes your apps, your infra, and your operations. We unify scaffolding, provisioning, and Day-2 ops, all verified by the Rust compiler. But enough slides... let's see it in action."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 7: Live Demo: Zero to Monitored App**
|
||||
|
||||
**Goal:** Show, don't just tell. Make it look effortless. This is where you build the "dream."
|
||||
|
||||
- **Visual:** Your terminal/IDE, ready to go.
|
||||
- **Narration Guide:**
|
||||
"Okay, for this demo, we're going to take a standard web app from GitHub. Nothing special about it."
|
||||
_(Show the repo)_
|
||||
"Now, let's bring it into Harmony. This is the entire definition we need to describe the application and its needs."
|
||||
_(Show the Rust DSL)_
|
||||
"First, let's run it locally on k3d. The exact same definition for dev as for prod."
|
||||
_(Deploy locally, show it works)_
|
||||
"Cool. But a real app needs monitoring. In Harmony, that's just adding a feature to our code."
|
||||
_(Uncomment one line: `.with_feature(Monitoring)` and redeploy)_
|
||||
"And just like that, we have a fully configured Prometheus and Grafana stack, scraping our app. No YAML, no extra config."
|
||||
"Finally, let's push this to our production staging cluster. We just change the target and specify our multi-site Ceph storage."
|
||||
_(Deploy to the remote cluster)_
|
||||
"And there it is. We've gone from a simple web app to a monitored, enterprise-grade service in minutes."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 8: Live Demo: Embracing Chaos**
|
||||
|
||||
**Goal:** Prove the "predictable" and "resilient" claims in the most dramatic way possible.
|
||||
|
||||
- **Visual:** A slide showing a map or diagram of your distributed infrastructure (the different data centers). Then switch back to your terminal.
|
||||
- **Narration Guide:**
|
||||
"This is great when things are sunny. But production is chaos. So... let's break things. On purpose."
|
||||
"First, a network failure." _(Kill a switch/link, show app is still up)_
|
||||
"Now, let's power off a storage server." _(Force off a server, show Ceph healing and the app is unaffected)_
|
||||
"How about a control plane node?" _(Force off a k8s control plane, show the cluster is still running)_
|
||||
"Okay, for the grand finale. What if we have a cascading failure? I'm going to kill _another_ storage server. This should cause a total failure in this data center."
|
||||
_(Force off the second server, narrate what's happening)_
|
||||
"And there it is... Ceph has lost quorum in this site... and Harmony has automatically failed everything over to our other datacenter. The app is still running."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 9: The New Reality**
|
||||
|
||||
**Goal:** Summarize the dream and tell the audience what you want them to do.
|
||||
|
||||
- **Visual:** The clean, simple Harmony Rust DSL code from Slide 6. A summary of what was just accomplished is listed next to it: `✓ GitHub to Prod in minutes`, `✓ Type-Safe Validation`, `✓ Built-in Monitoring`, `✓ Automated Multi-Site Failover`.
|
||||
- **Narration:**
|
||||
"So, in just a few minutes, we went from a simple web app to a multi-site, monitored, and chaos-proof production deployment. We did it with a small amount of code that is easy to read, easy to verify, and completely portable. This is our vision: to offload the complexity, and make infrastructure simple, predictable, and even fun again."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 10: Join Us**
|
||||
|
||||
- **Visual:** A clean, final slide with QR codes and links.
|
||||
- GitHub Repo (`github.com/nation-tech/harmony`)
|
||||
- Website (`harmony.sh` or similar)
|
||||
- Your contact info (`jg@nation.tech` / LinkedIn / Twitter)
|
||||
- **Narration:**
|
||||
"Harmony is open-source, AGPLv3. We believe this is the future, but we're just getting started. We know this crowd has great infrastructure minds out there, and we need your feedback. Please, check out the project on GitHub. Star it if you like what you see. Tell us what's missing. Let's build this future together. Thank you."
|
||||
|
||||
**(Open for Q&A)**
|
||||
@@ -1,8 +0,0 @@
|
||||
## Bios settings
|
||||
|
||||
1. CSM : Disabled (compatibility support to boot gpt formatted drives)
|
||||
2. Secure boot : disabled
|
||||
3. Boot order :
|
||||
1. Local Hard drive
|
||||
2. PXE IPv4
|
||||
4. System clock, make sure it is adjusted, otherwise you will get invalid certificates error
|
||||
@@ -27,9 +27,9 @@ async fn main() {
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
});
|
||||
|
||||
let webhook_receiver = WebhookReceiver {
|
||||
|
||||
@@ -5,12 +5,7 @@ use std::{
|
||||
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
config::secret::SshKeyPair,
|
||||
data::{FileContent, FilePath},
|
||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
config::secret::SshKeyPair, data::{FileContent, FilePath}, hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, modules::{
|
||||
http::StaticFilesHttpScore,
|
||||
okd::{
|
||||
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
||||
@@ -18,8 +13,7 @@ use harmony::{
|
||||
dns::OKDDnsScore, ipxe::OKDIpxeScore,
|
||||
},
|
||||
tftp::TftpScore,
|
||||
},
|
||||
topology::{LogicalHost, UnmanagedRouter},
|
||||
}, topology::{LogicalHost, UnmanagedRouter}
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
use harmony_secret::SecretManager;
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "rhob-application-monitoring"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
tokio = { workspace = true }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
base64.workspace = true
|
||||
@@ -1,49 +0,0 @@
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::rhob_monitoring::RHOBMonitoring,
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "test-rhob-monitoring".to_string(),
|
||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
});
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(RHOBMonitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![Box::new(discord_receiver)],
|
||||
}),
|
||||
// TODO add backups, multisite ha, etc
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(app)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -13,25 +13,25 @@ use harmony::{
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
project_root: PathBuf::from("./webapp"),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
});
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let webhook_receiver = WebhookReceiver {
|
||||
name: "sample-webhook-receiver".to_string(),
|
||||
url: hurl!("https://webhook-doesnt-exist.com"),
|
||||
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "example-try-rust-webapp"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
tokio = { workspace = true }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
base64.workspace = true
|
||||
@@ -1,50 +0,0 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring, rhob_monitoring::RHOBMonitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-tryrust".to_string(),
|
||||
project_root: PathBuf::from("./tryrust.org"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
});
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(ContinuousDelivery {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(RHOBMonitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![Box::new(discord_receiver)],
|
||||
}),
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(app)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
Submodule examples/try_rust_webapp/tryrust.org deleted from 0f9ba14517
@@ -8,8 +8,7 @@ use harmony::{
|
||||
load_balancer::LoadBalancerScore,
|
||||
},
|
||||
topology::{
|
||||
BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService,
|
||||
SSL,
|
||||
BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService, SSL,
|
||||
},
|
||||
};
|
||||
use harmony_macros::ipv4;
|
||||
|
||||
@@ -10,11 +10,7 @@ testing = []
|
||||
|
||||
[dependencies]
|
||||
hex = "0.4"
|
||||
reqwest = { version = "0.11", features = [
|
||||
"blocking",
|
||||
"json",
|
||||
"rustls-tls",
|
||||
], default-features = false }
|
||||
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
|
||||
russh = "0.45.0"
|
||||
rust-ipmi = "0.1.1"
|
||||
semver = "1.0.23"
|
||||
@@ -70,7 +66,6 @@ tar.workspace = true
|
||||
base64.workspace = true
|
||||
thiserror.workspace = true
|
||||
once_cell = "1.21.3"
|
||||
walkdir = "2.5.0"
|
||||
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
||||
harmony_secret_derive = { path = "../harmony_secret_derive" }
|
||||
harmony_secret = { path = "../harmony_secret" }
|
||||
|
||||
@@ -149,98 +149,6 @@ impl PhysicalHost {
|
||||
parts.join(" | ")
|
||||
}
|
||||
|
||||
pub fn parts_list(&self) -> String {
|
||||
let PhysicalHost {
|
||||
id,
|
||||
category,
|
||||
network,
|
||||
storage,
|
||||
labels,
|
||||
memory_modules,
|
||||
cpus,
|
||||
} = self;
|
||||
|
||||
let mut parts_list = String::new();
|
||||
parts_list.push_str("\n\n=====================");
|
||||
parts_list.push_str(&format!("\nHost ID {id}"));
|
||||
parts_list.push_str("\n=====================");
|
||||
parts_list.push_str("\n\n=====================");
|
||||
parts_list.push_str(&format!("\nCPU count {}", cpus.len()));
|
||||
parts_list.push_str("\n=====================");
|
||||
cpus.iter().for_each(|c| {
|
||||
let CPU {
|
||||
model,
|
||||
vendor,
|
||||
cores,
|
||||
threads,
|
||||
frequency_mhz,
|
||||
} = c;
|
||||
parts_list.push_str(&format!(
|
||||
"\n{vendor} {model}, {cores}/{threads} {}Ghz",
|
||||
*frequency_mhz as f64 / 1000.0
|
||||
));
|
||||
});
|
||||
|
||||
parts_list.push_str("\n\n=====================");
|
||||
parts_list.push_str(&format!("\nNetwork Interfaces count {}", network.len()));
|
||||
parts_list.push_str("\n=====================");
|
||||
network.iter().for_each(|nic| {
|
||||
parts_list.push_str(&format!(
|
||||
"\nNic({} {}Gbps mac({}) ipv4({}), ipv6({})",
|
||||
nic.name,
|
||||
nic.speed_mbps.unwrap_or(0) / 1000,
|
||||
nic.mac_address,
|
||||
nic.ipv4_addresses.join(","),
|
||||
nic.ipv6_addresses.join(",")
|
||||
));
|
||||
});
|
||||
|
||||
parts_list.push_str("\n\n=====================");
|
||||
parts_list.push_str(&format!("\nStorage drives count {}", storage.len()));
|
||||
parts_list.push_str("\n=====================");
|
||||
storage.iter().for_each(|drive| {
|
||||
let StorageDrive {
|
||||
name,
|
||||
model,
|
||||
serial,
|
||||
size_bytes,
|
||||
logical_block_size: _,
|
||||
physical_block_size: _,
|
||||
rotational: _,
|
||||
wwn: _,
|
||||
interface_type,
|
||||
smart_status,
|
||||
} = drive;
|
||||
parts_list.push_str(&format!(
|
||||
"\n{name} {}Gb {model} {interface_type} smart({smart_status:?}) {serial}",
|
||||
size_bytes / 1000 / 1000 / 1000
|
||||
));
|
||||
});
|
||||
|
||||
parts_list.push_str("\n\n=====================");
|
||||
parts_list.push_str(&format!("\nMemory modules count {}", memory_modules.len()));
|
||||
parts_list.push_str("\n=====================");
|
||||
memory_modules.iter().for_each(|mem| {
|
||||
let MemoryModule {
|
||||
size_bytes,
|
||||
speed_mhz,
|
||||
manufacturer,
|
||||
part_number,
|
||||
serial_number,
|
||||
rank,
|
||||
} = mem;
|
||||
parts_list.push_str(&format!(
|
||||
"\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})",
|
||||
size_bytes / 1000 / 1000 / 1000,
|
||||
speed_mhz.unwrap_or(0),
|
||||
manufacturer.as_ref().unwrap_or(&String::new()),
|
||||
part_number.as_ref().unwrap_or(&String::new()),
|
||||
));
|
||||
});
|
||||
|
||||
parts_list
|
||||
}
|
||||
|
||||
pub fn cluster_mac(&self) -> MacAddress {
|
||||
self.network
|
||||
.first()
|
||||
|
||||
@@ -33,7 +33,6 @@ pub enum InterpretName {
|
||||
DiscoverInventoryAgent,
|
||||
CephClusterHealth,
|
||||
Custom(&'static str),
|
||||
RHOBAlerting,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for InterpretName {
|
||||
@@ -63,7 +62,6 @@ impl std::fmt::Display for InterpretName {
|
||||
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||
InterpretName::Custom(name) => f.write_str(name),
|
||||
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ impl InventoryFilter {
|
||||
use derive_new::new;
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::EnumIter;
|
||||
|
||||
use crate::hardware::{ManagementInterface, ManualManagementInterface};
|
||||
|
||||
@@ -64,7 +63,7 @@ impl Inventory {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::Type, Clone, EnumIter)]
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::Type, Clone)]
|
||||
pub enum HostRole {
|
||||
Bootstrap,
|
||||
ControlPlane,
|
||||
|
||||
@@ -29,7 +29,7 @@ pub trait InventoryRepository: Send + Sync + 'static {
|
||||
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>;
|
||||
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>;
|
||||
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError>;
|
||||
async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError>;
|
||||
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError>;
|
||||
async fn save_role_mapping(
|
||||
&self,
|
||||
role: &HostRole,
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
use crate::topology::PreparationError;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Ingress {
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError>;
|
||||
}
|
||||
@@ -17,7 +17,7 @@ use kube::{
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::{Value, json};
|
||||
use serde_json::json;
|
||||
use similar::TextDiff;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
@@ -53,21 +53,6 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_resource_json_value(
|
||||
&self,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
gvk: &GroupVersionKind,
|
||||
) -> Result<DynamicObject, Error> {
|
||||
let gvk = ApiResource::from_gvk(gvk);
|
||||
let resource: Api<DynamicObject> = if let Some(ns) = namespace {
|
||||
Api::namespaced_with(self.client.clone(), ns, &gvk)
|
||||
} else {
|
||||
Api::default_namespaced_with(self.client.clone(), &gvk)
|
||||
};
|
||||
Ok(resource.get(name).await?)
|
||||
}
|
||||
|
||||
pub async fn get_deployment(
|
||||
&self,
|
||||
name: &str,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::GroupVersionKind;
|
||||
use log::{debug, info, warn};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
@@ -15,15 +14,13 @@ use crate::{
|
||||
monitoring::kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus,
|
||||
prometheus_operator::prometheus_operator_helm_chart_score,
|
||||
rhob_alertmanager_config::RHOBObservability,
|
||||
},
|
||||
prometheus::{
|
||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||
prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
||||
prometheus::PrometheusApplicationMonitoring,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::ingress::Ingress,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -111,43 +108,6 @@ impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
||||
async fn install_prometheus(
|
||||
&self,
|
||||
sender: &RHOBObservability,
|
||||
inventory: &Inventory,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<RHOBObservability>>>>,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
let po_result = self.ensure_cluster_observability_operator(sender).await?;
|
||||
|
||||
if po_result == PreparationOutcome::Noop {
|
||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
||||
return Ok(po_result);
|
||||
}
|
||||
|
||||
let result = self
|
||||
.get_cluster_observability_operator_prometheus_application_score(
|
||||
sender.clone(),
|
||||
receivers,
|
||||
)
|
||||
.await
|
||||
.interpret(inventory, self)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(outcome) => match outcome.status {
|
||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
||||
details: outcome.message,
|
||||
}),
|
||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
||||
_ => Err(PreparationError::new(outcome.message)),
|
||||
},
|
||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for K8sAnywhereTopology {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
@@ -174,19 +134,6 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
||||
&self,
|
||||
sender: RHOBObservability,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<RHOBObservability>>>>,
|
||||
) -> RHOBAlertingScore {
|
||||
RHOBAlertingScore {
|
||||
sender,
|
||||
receivers: receivers.unwrap_or_default(),
|
||||
service_monitors: vec![],
|
||||
prometheus_rules: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_k8s_prometheus_application_score(
|
||||
&self,
|
||||
sender: CRDPrometheus,
|
||||
@@ -200,26 +147,6 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await?;
|
||||
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
|
||||
if ready_replicas >= 1 {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(PreparationError::new(
|
||||
"openshift-ingress-operator not available".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn is_helm_available(&self) -> Result<(), String> {
|
||||
let version_result = Command::new("helm")
|
||||
.arg("version")
|
||||
@@ -359,62 +286,6 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
async fn ensure_cluster_observability_operator(
|
||||
&self,
|
||||
sender: &RHOBObservability,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
let status = Command::new("sh")
|
||||
.args(["-c", "kubectl get crd -A | grep -i rhobs"])
|
||||
.status()
|
||||
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
|
||||
|
||||
if !status.success() {
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
warn!("Installing observability operator is not supported on LocalK3d source");
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
debug!("installing cluster observability operator");
|
||||
todo!();
|
||||
let op_score =
|
||||
prometheus_operator_helm_chart_score(sender.namespace.clone());
|
||||
let result = op_score.interpret(&Inventory::empty(), self).await;
|
||||
|
||||
return match result {
|
||||
Ok(outcome) => match outcome.status {
|
||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
||||
details: "installed cluster observability operator".into(),
|
||||
}),
|
||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
||||
_ => Err(PreparationError::new(
|
||||
"failed to install cluster observability operator (unknown error)".into(),
|
||||
)),
|
||||
},
|
||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||
};
|
||||
}
|
||||
K8sSource::Kubeconfig => {
|
||||
debug!(
|
||||
"unable to install cluster observability operator, contact cluster admin"
|
||||
);
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!(
|
||||
"Unable to detect k8s_state. Skipping Cluster Observability Operator install."
|
||||
);
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Cluster Observability Operator is already present, skipping install");
|
||||
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: "cluster observability operator present in cluster".into(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn ensure_prometheus_operator(
|
||||
&self,
|
||||
sender: &CRDPrometheus,
|
||||
@@ -552,7 +423,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
|
||||
match self.config.harmony_profile.to_lowercase().as_str() {
|
||||
"staging" => DeploymentTarget::Staging,
|
||||
"production" => DeploymentTarget::Production,
|
||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"),
|
||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -574,45 +445,3 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
||||
K8sSource::Kubeconfig => {
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
||||
})?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(PreparationError::new(
|
||||
"Cannot get domain: unable to detect K8s state".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,13 +102,13 @@ pub enum HttpStatusCode {
|
||||
ServerError5xx,
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub enum SSL {
|
||||
SSL,
|
||||
Disabled,
|
||||
Default,
|
||||
SNI,
|
||||
Other(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
mod ha_cluster;
|
||||
pub mod ingress;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
|
||||
@@ -109,7 +109,7 @@ impl InventoryRepository for SqliteInventoryRepository {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError> {
|
||||
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError> {
|
||||
struct HostIdRow {
|
||||
host_id: String,
|
||||
}
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
use async_trait::async_trait;
|
||||
use log::{debug, error, info, warn};
|
||||
use opnsense_config_xml::{
|
||||
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, MaybeString,
|
||||
};
|
||||
use log::{debug, info, warn};
|
||||
use opnsense_config_xml::{Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, MaybeString};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{
|
||||
BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService,
|
||||
LogicalHost, SSL,
|
||||
LogicalHost,
|
||||
},
|
||||
};
|
||||
use harmony_types::net::IpAddress;
|
||||
@@ -208,22 +206,7 @@ pub(crate) fn get_health_check_for_backend(
|
||||
.unwrap_or_default()
|
||||
.into();
|
||||
let status_code: HttpStatusCode = HttpStatusCode::Success2xx;
|
||||
let ssl = match haproxy_health_check
|
||||
.ssl
|
||||
.content_string()
|
||||
.to_uppercase()
|
||||
.as_str()
|
||||
{
|
||||
"SSL" => SSL::SSL,
|
||||
"SSLNI" => SSL::SNI,
|
||||
"NOSSL" => SSL::Disabled,
|
||||
"" => SSL::Default,
|
||||
other => {
|
||||
error!("Unknown haproxy health check ssl config {other}");
|
||||
SSL::Other(other.to_string())
|
||||
}
|
||||
};
|
||||
Some(HealthCheck::HTTP(path, method, status_code, ssl))
|
||||
Some(HealthCheck::HTTP(path, method, status_code))
|
||||
}
|
||||
_ => panic!("Received unsupported health check type {}", uppercase),
|
||||
}
|
||||
@@ -260,11 +243,10 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
|
||||
match health_check {
|
||||
HealthCheck::HTTP(path, http_method, _http_status_code, ssl) => {
|
||||
let ssl: MaybeString = match ssl {
|
||||
SSL::SSL => "ssl".into(),
|
||||
SSL::SNI => "sslni".into(),
|
||||
SSL::Disabled => "nossl".into(),
|
||||
SSL::Default => "".into(),
|
||||
SSL::Other(other) => other.as_str().into(),
|
||||
crate::topology::SSL::SSL => "ssl".into(),
|
||||
crate::topology::SSL::SNI => "sslni".into(),
|
||||
crate::topology::SSL::Disabled => "nossl".into(),
|
||||
crate::topology::SSL::Default => "".into(),
|
||||
};
|
||||
let haproxy_check = HAProxyHealthCheck {
|
||||
name: format!("HTTP_{http_method}_{path}"),
|
||||
|
||||
@@ -14,9 +14,7 @@ use crate::{
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
},
|
||||
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||
};
|
||||
|
||||
/// ContinuousDelivery in Harmony provides this functionality :
|
||||
@@ -138,25 +136,18 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
#[async_trait]
|
||||
impl<
|
||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
let image = self.application.image_name();
|
||||
let domain = topology
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
// url, etc..)
|
||||
// Or ask for it when unknown
|
||||
|
||||
let helm_chart = self
|
||||
.application
|
||||
.build_push_helm_package(&image, &domain)
|
||||
.await?;
|
||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||
|
||||
// TODO: Make building image configurable/skippable if image already exists (prompt)")
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/104
|
||||
@@ -185,18 +176,18 @@ impl<
|
||||
}
|
||||
target => {
|
||||
info!("Deploying {} to target {target:?}", self.application.name());
|
||||
|
||||
let score = ArgoHelmScore {
|
||||
namespace: format!("{}", self.application.name()),
|
||||
namespace: "harmony-example-rust-webapp".to_string(),
|
||||
openshift: true,
|
||||
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
||||
values_overrides: None,
|
||||
name: format!("{}", self.application.name()),
|
||||
namespace: format!("{}", self.application.name()),
|
||||
name: "harmony-demo-rust-webapp".to_string(),
|
||||
namespace: "harmony-example-rust-webapp".to_string(),
|
||||
})],
|
||||
};
|
||||
score
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
use async_trait::async_trait;
|
||||
use kube::{Api, api::GroupVersionKind};
|
||||
use log::{debug, warn};
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::{process::Command, str::FromStr, sync::Arc};
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
@@ -12,10 +9,7 @@ use crate::{
|
||||
inventory::Inventory,
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
||||
k8s::K8sClient,
|
||||
},
|
||||
topology::{HelmCommand, K8sclient, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
@@ -25,13 +19,15 @@ use super::ArgoApplication;
|
||||
pub struct ArgoHelmScore {
|
||||
pub namespace: String,
|
||||
pub openshift: bool,
|
||||
pub domain: String,
|
||||
pub argo_apps: Vec<ArgoApplication>,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore {
|
||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
let helm_score = argo_helm_chart_score(&self.namespace, self.openshift, &self.domain);
|
||||
Box::new(ArgoInterpret {
|
||||
score: self.clone(),
|
||||
score: helm_score,
|
||||
argo_apps: self.argo_apps.clone(),
|
||||
})
|
||||
}
|
||||
@@ -43,24 +39,20 @@ impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ArgoInterpret {
|
||||
score: ArgoHelmScore,
|
||||
score: HelmChartScore,
|
||||
argo_apps: Vec<ArgoApplication>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret {
|
||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.score.interpret(inventory, topology).await?;
|
||||
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let domain = topology.get_domain("argo").await?;
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
|
||||
helm_score.interpret(inventory, topology).await?;
|
||||
|
||||
k8s_client
|
||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
||||
.await
|
||||
@@ -93,38 +85,6 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
}
|
||||
}
|
||||
|
||||
impl ArgoInterpret {
|
||||
pub async fn get_host_domain(
|
||||
&self,
|
||||
client: Arc<K8sClient>,
|
||||
openshift: bool,
|
||||
) -> Result<String, InterpretError> {
|
||||
//This should be the job of the topology to determine if we are in
|
||||
//openshift, potentially we need on openshift topology the same way we create a
|
||||
//localhosttopology
|
||||
match openshift {
|
||||
true => {
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => return Ok(domain.to_string()),
|
||||
None => return Err(InterpretError::new("Could not find domain".to_string())),
|
||||
}
|
||||
}
|
||||
false => {
|
||||
todo!()
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn argo_helm_chart_score(namespace: &str, openshift: bool, domain: &str) -> HelmChartScore {
|
||||
let values = format!(
|
||||
r#"
|
||||
@@ -700,7 +660,7 @@ server:
|
||||
# nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
|
||||
# -- Defines which ingress controller will implement the resource
|
||||
ingressClassName: "openshift-default"
|
||||
ingressClassName: ""
|
||||
|
||||
# -- Argo CD server hostname
|
||||
# @default -- `""` (defaults to global.domain)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
mod endpoint;
|
||||
pub mod rhob_monitoring;
|
||||
pub use endpoint::*;
|
||||
|
||||
mod monitoring;
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::modules::application::{Application, ApplicationFeature};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
@@ -17,12 +19,8 @@ use crate::{
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_secret_derive::Secret;
|
||||
use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Monitoring {
|
||||
@@ -38,9 +36,8 @@ impl<
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
||||
+ Ingress
|
||||
+ std::fmt::Debug,
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>,
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
@@ -50,7 +47,6 @@ impl<
|
||||
.await
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||
|
||||
let mut alerting_score = ApplicationMonitoringScore {
|
||||
sender: CRDPrometheus {
|
||||
@@ -62,17 +58,19 @@ impl<
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: domain,
|
||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let config = SecretManager::get_or_prompt::<NtfyAuth>().await.unwrap();
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
let ntfy_default_auth_header = format!(
|
||||
"Basic {}",
|
||||
general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password))
|
||||
general_purpose::STANDARD.encode(format!(
|
||||
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||
))
|
||||
);
|
||||
|
||||
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||
@@ -102,17 +100,9 @@ impl<
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"Monitoring".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Clone, Debug)]
|
||||
struct NtfyAuth {
|
||||
username: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::modules::application::{Application, ApplicationFeature};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||
};
|
||||
use crate::{
|
||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RHOBMonitoring {
|
||||
pub application: Arc<dyn Application>,
|
||||
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<
|
||||
T: Topology
|
||||
+ HelmCommand
|
||||
+ 'static
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ Ingress
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||
> ApplicationFeature<T> for RHOBMonitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
info!("Ensuring monitoring is available for application");
|
||||
let namespace = topology
|
||||
.get_tenant_config()
|
||||
.await
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
|
||||
let mut alerting_score = ApplicationRHOBMonitoringScore {
|
||||
sender: RHOBObservability {
|
||||
namespace: namespace.clone(),
|
||||
client: topology.k8s_client().await.unwrap(),
|
||||
},
|
||||
application: self.application.clone(),
|
||||
receivers: self.alert_receiver.clone(),
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: topology
|
||||
.get_domain("ntfy")
|
||||
.await
|
||||
.map_err(|e| format!("Could not get domain {e}"))?,
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
let ntfy_default_auth_header = format!(
|
||||
"Basic {}",
|
||||
general_purpose::STANDARD.encode(format!(
|
||||
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||
))
|
||||
);
|
||||
|
||||
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||
|
||||
let ntfy_default_auth_param = general_purpose::STANDARD
|
||||
.encode(ntfy_default_auth_header)
|
||||
.replace("=", "");
|
||||
|
||||
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||
|
||||
let ntfy_receiver = WebhookReceiver {
|
||||
name: "ntfy-webhook".to_string(),
|
||||
url: Url::Url(
|
||||
url::Url::parse(
|
||||
format!(
|
||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||
namespace.clone()
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
alerting_score.receivers.push(Box::new(ntfy_receiver));
|
||||
alerting_score
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
Ok(())
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"Monitoring".to_string()
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::Application;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Application;
|
||||
|
||||
#[async_trait]
|
||||
pub trait OCICompliant: Application {
|
||||
async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here
|
||||
@@ -16,10 +17,5 @@ pub trait HelmPackage: Application {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
|
||||
/// * `domain` - The domain where the application is hosted.
|
||||
async fn build_push_helm_package(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<String, String>;
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::fs::{self};
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
@@ -12,11 +12,11 @@ use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||
use futures_util::StreamExt;
|
||||
use log::{debug, info, log_enabled};
|
||||
use serde::Serialize;
|
||||
use tar::{Builder, Header};
|
||||
use walkdir::WalkDir;
|
||||
use tar::Archive;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::{score::Score, topology::Topology};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
|
||||
@@ -56,9 +56,9 @@ pub enum RustWebFramework {
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RustWebapp {
|
||||
pub name: String,
|
||||
pub domain: Url,
|
||||
/// The path to the root of the Rust project to be containerized.
|
||||
pub project_root: PathBuf,
|
||||
pub service_port: u32,
|
||||
pub framework: Option<RustWebFramework>,
|
||||
}
|
||||
|
||||
@@ -70,17 +70,12 @@ impl Application for RustWebapp {
|
||||
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<String, String> {
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
||||
|
||||
// 1. Create the Helm chart files on disk.
|
||||
let chart_dir = self
|
||||
.create_helm_chart_files(image_url, domain)
|
||||
.await
|
||||
.create_helm_chart_files(image_url)
|
||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
|
||||
@@ -163,100 +158,45 @@ impl RustWebapp {
|
||||
image_name: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
debug!("Generating Dockerfile for '{}'", self.name);
|
||||
let dockerfile = self.get_or_build_dockerfile();
|
||||
let _dockerfile_path = self.build_dockerfile()?;
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
|
||||
let quiet = !log_enabled!(log::Level::Debug);
|
||||
match dockerfile
|
||||
|
||||
let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default()
|
||||
.dockerfile("Dockerfile.harmony")
|
||||
.t(image_name)
|
||||
.q(quiet)
|
||||
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
|
||||
.platform("linux/x86_64");
|
||||
|
||||
let mut temp_tar_builder = tar::Builder::new(Vec::new());
|
||||
temp_tar_builder
|
||||
.append_dir_all("", self.project_root.clone())
|
||||
.unwrap();
|
||||
let archive = temp_tar_builder
|
||||
.into_inner()
|
||||
.expect("couldn't finish creating tar");
|
||||
let archived_files = Archive::new(archive.as_slice())
|
||||
.entries()
|
||||
.unwrap()
|
||||
.file_name()
|
||||
.and_then(|os_str| os_str.to_str())
|
||||
{
|
||||
Some(path_str) => {
|
||||
debug!("Building from dockerfile {}", path_str);
|
||||
.map(|entry| entry.unwrap().path().unwrap().into_owned())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let tar_data = self
|
||||
.create_deterministic_tar(&self.project_root.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
debug!("files in docker tar: {:#?}", archived_files);
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
let mut image_build_stream = docker.build_image(
|
||||
build_image_options.build(),
|
||||
None,
|
||||
Some(body_full(archive.into())),
|
||||
);
|
||||
|
||||
let build_image_options =
|
||||
bollard::query_parameters::BuildImageOptionsBuilder::default()
|
||||
.dockerfile(path_str)
|
||||
.t(image_name)
|
||||
.q(quiet)
|
||||
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
|
||||
.platform("linux/x86_64");
|
||||
|
||||
let mut image_build_stream = docker.build_image(
|
||||
build_image_options.build(),
|
||||
None,
|
||||
Some(body_full(tar_data.into())),
|
||||
);
|
||||
|
||||
while let Some(msg) = image_build_stream.next().await {
|
||||
debug!("Message: {msg:?}");
|
||||
}
|
||||
|
||||
Ok(image_name.to_string())
|
||||
}
|
||||
|
||||
None => Err(Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"Path is not valid UTF-8",
|
||||
))),
|
||||
while let Some(msg) = image_build_stream.next().await {
|
||||
debug!("Message: {msg:?}");
|
||||
}
|
||||
}
|
||||
|
||||
///normalizes timestamp and ignores files that will bust the docker cach
|
||||
async fn create_deterministic_tar(
|
||||
&self,
|
||||
project_root: &std::path::Path,
|
||||
) -> Result<Vec<u8>, Box<dyn std::error::Error>> {
|
||||
debug!("building tar file from project root {:#?}", project_root);
|
||||
let mut tar_data = Vec::new();
|
||||
{
|
||||
let mut builder = Builder::new(&mut tar_data);
|
||||
let ignore_prefixes = [
|
||||
"target",
|
||||
".git",
|
||||
".github",
|
||||
".harmony_generated",
|
||||
"harmony",
|
||||
"node_modules",
|
||||
];
|
||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.filter(|e| e.file_type().is_file())
|
||||
.filter(|e| {
|
||||
let rel_path = e.path().strip_prefix(project_root).unwrap();
|
||||
!ignore_prefixes
|
||||
.iter()
|
||||
.any(|prefix| rel_path.starts_with(prefix))
|
||||
})
|
||||
.collect();
|
||||
entries.sort_by_key(|e| e.path().to_owned());
|
||||
|
||||
for entry in entries {
|
||||
let path = entry.path();
|
||||
let rel_path = path.strip_prefix(project_root).unwrap();
|
||||
|
||||
let mut file = fs::File::open(path)?;
|
||||
let mut header = Header::new_gnu();
|
||||
|
||||
header.set_size(entry.metadata()?.len());
|
||||
header.set_mode(0o644);
|
||||
header.set_mtime(0);
|
||||
header.set_uid(0);
|
||||
header.set_gid(0);
|
||||
|
||||
builder.append_data(&mut header, rel_path, &mut file)?;
|
||||
}
|
||||
|
||||
builder.finish()?;
|
||||
}
|
||||
Ok(tar_data)
|
||||
Ok(image_name.to_string())
|
||||
}
|
||||
|
||||
/// Tags and pushes a Docker image to the configured remote registry.
|
||||
@@ -268,6 +208,8 @@ impl RustWebapp {
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
|
||||
// let push_options = PushImageOptionsBuilder::new().tag(tag);
|
||||
|
||||
let mut push_image_stream = docker.push_image(
|
||||
image_tag,
|
||||
Some(PushImageOptionsBuilder::new().build()),
|
||||
@@ -275,8 +217,6 @@ impl RustWebapp {
|
||||
);
|
||||
|
||||
while let Some(msg) = push_image_stream.next().await {
|
||||
// let msg = msg?;
|
||||
// TODO this fails silently, for some reason bollard cannot push to hub.nationtech.io
|
||||
debug!("Message: {msg:?}");
|
||||
}
|
||||
|
||||
@@ -332,11 +272,8 @@ impl RustWebapp {
|
||||
"groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser",
|
||||
));
|
||||
|
||||
dockerfile.push(ENV::from(format!(
|
||||
"LEPTOS_SITE_ADDR=0.0.0.0:{}",
|
||||
self.service_port
|
||||
)));
|
||||
dockerfile.push(EXPOSE::from(format!("{}/tcp", self.service_port)));
|
||||
dockerfile.push(ENV::from("LEPTOS_SITE_ADDR=0.0.0.0:3000"));
|
||||
dockerfile.push(EXPOSE::from("3000/tcp"));
|
||||
dockerfile.push(WORKDIR::from("/home/appuser"));
|
||||
|
||||
// Copy static files
|
||||
@@ -411,10 +348,9 @@ impl RustWebapp {
|
||||
}
|
||||
|
||||
/// Creates all necessary files for a basic Helm chart.
|
||||
async fn create_helm_chart_files(
|
||||
fn create_helm_chart_files(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_name = format!("{}-chart", self.name);
|
||||
let chart_dir = self
|
||||
@@ -458,137 +394,132 @@ image:
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: {}
|
||||
port: 3000
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
# Annotations for cert-manager to handle SSL.
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
# Add other annotations like nginx ingress class if needed
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
hosts:
|
||||
- host: {}
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- secretName: {}-tls
|
||||
hosts:
|
||||
- chart-example.local
|
||||
|
||||
"#,
|
||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
||||
chart_name, image_repo, image_tag, self.name
|
||||
);
|
||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||
|
||||
// Create templates/_helpers.tpl
|
||||
let helpers_tpl = format!(
|
||||
r#"
|
||||
{{{{/*
|
||||
let helpers_tpl = r#"
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}}}
|
||||
{{{{- define "chart.name" -}}}}
|
||||
{{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
|
||||
{{{{- end }}}}
|
||||
*/}}
|
||||
{{- define "chart.name" -}}
|
||||
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{{{/*
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}}}
|
||||
{{{{- define "chart.fullname" -}}}}
|
||||
{{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
|
||||
{{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
|
||||
{{{{- end }}}}
|
||||
"#
|
||||
);
|
||||
*/}}
|
||||
{{- define "chart.fullname" -}}
|
||||
{{- $name := default .Chart.Name $.Values.nameOverride }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
"#;
|
||||
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
|
||||
|
||||
// Create templates/service.yaml
|
||||
let service_yaml = format!(
|
||||
r#"
|
||||
let service_yaml = r#"
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{{{ include "chart.fullname" . }}}}
|
||||
name: {{ include "chart.fullname" . }}
|
||||
spec:
|
||||
type: {{{{ $.Values.service.type }}}}
|
||||
type: {{ $.Values.service.type }}
|
||||
ports:
|
||||
- name: main
|
||||
port: {{{{ $.Values.service.port | default {} }}}}
|
||||
targetPort: {{{{ $.Values.service.port | default {} }}}}
|
||||
port: {{ $.Values.service.port | default 3000 }}
|
||||
targetPort: {{ $.Values.service.port | default 3000 }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{{{ include "chart.name" . }}}}
|
||||
"#,
|
||||
self.service_port, self.service_port
|
||||
);
|
||||
app: {{ include "chart.name" . }}
|
||||
"#;
|
||||
fs::write(templates_dir.join("service.yaml"), service_yaml)?;
|
||||
|
||||
// Create templates/deployment.yaml
|
||||
let deployment_yaml = format!(
|
||||
r#"
|
||||
let deployment_yaml = r#"
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{{{ include "chart.fullname" . }}}}
|
||||
name: {{ include "chart.fullname" . }}
|
||||
spec:
|
||||
replicas: {{{{ $.Values.replicaCount }}}}
|
||||
replicas: {{ $.Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{{{ include "chart.name" . }}}}
|
||||
app: {{ include "chart.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{{{ include "chart.name" . }}}}
|
||||
app: {{ include "chart.name" . }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{{{ .Chart.Name }}}}
|
||||
image: "{{{{ $.Values.image.repository }}}}:{{{{ $.Values.image.tag | default .Chart.AppVersion }}}}"
|
||||
imagePullPolicy: {{{{ $.Values.image.pullPolicy }}}}
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ $.Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: main
|
||||
containerPort: {{{{ $.Values.service.port | default {} }}}}
|
||||
containerPort: {{ $.Values.service.port | default 3000 }}
|
||||
protocol: TCP
|
||||
"#,
|
||||
self.service_port
|
||||
);
|
||||
"#;
|
||||
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
||||
|
||||
// Create templates/ingress.yaml
|
||||
let ingress_yaml = format!(
|
||||
r#"
|
||||
{{{{- if $.Values.ingress.enabled -}}}}
|
||||
let ingress_yaml = r#"
|
||||
{{- if $.Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{{{ include "chart.fullname" . }}}}
|
||||
name: {{ include "chart.fullname" . }}
|
||||
annotations:
|
||||
{{{{- toYaml $.Values.ingress.annotations | nindent 4 }}}}
|
||||
{{- toYaml $.Values.ingress.annotations | nindent 4 }}
|
||||
spec:
|
||||
{{{{- if $.Values.ingress.tls }}}}
|
||||
{{- if $.Values.ingress.tls }}
|
||||
tls:
|
||||
{{{{- range $.Values.ingress.tls }}}}
|
||||
{{- range $.Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{{{- range .hosts }}}}
|
||||
- {{{{ . | quote }}}}
|
||||
{{{{- end }}}}
|
||||
secretName: {{{{ .secretName }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{{{- range $.Values.ingress.hosts }}}}
|
||||
- host: {{{{ .host | quote }}}}
|
||||
{{- range $.Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{{{- range .paths }}}}
|
||||
- path: {{{{ .path }}}}
|
||||
pathType: {{{{ .pathType }}}}
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{{{ include "chart.fullname" $ }}}}
|
||||
name: {{ include "chart.fullname" $ }}
|
||||
port:
|
||||
number: {{{{ $.Values.service.port | default {} }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
{{{{- end }}}}
|
||||
"#,
|
||||
self.service_port
|
||||
);
|
||||
number: {{ $.Values.service.port | default 3000 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
"#;
|
||||
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
|
||||
|
||||
Ok(chart_dir)
|
||||
@@ -640,6 +571,7 @@ spec:
|
||||
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();
|
||||
let oci_push_url = format!("oci://{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT);
|
||||
let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name);
|
||||
|
||||
debug!(
|
||||
"Pushing Helm chart {} to {}",
|
||||
packaged_chart_path.to_string_lossy(),
|
||||
@@ -658,20 +590,4 @@ spec:
|
||||
debug!("push url {oci_push_url}");
|
||||
Ok(format!("{}:{}", oci_pull_url, version))
|
||||
}
|
||||
|
||||
fn get_or_build_dockerfile(&self) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let existing_dockerfile = self.project_root.join("Dockerfile");
|
||||
|
||||
debug!("project_root = {:?}", self.project_root);
|
||||
|
||||
debug!("checking = {:?}", existing_dockerfile);
|
||||
if existing_dockerfile.exists() {
|
||||
debug!(
|
||||
"Checking path {:#?} for existing Dockerfile",
|
||||
self.project_root.clone()
|
||||
);
|
||||
return Ok(existing_dockerfile);
|
||||
}
|
||||
self.build_dockerfile()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,10 +153,6 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
||||
Some(yaml_str) => {
|
||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||
debug!(
|
||||
"values yaml string for chart {} :\n {yaml_str}",
|
||||
self.score.chart_name
|
||||
);
|
||||
Some(tf.path())
|
||||
}
|
||||
None => None,
|
||||
|
||||
@@ -76,7 +76,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
Ok(choice) => {
|
||||
info!("Selected {} as the bootstrap node.", choice.summary());
|
||||
host_repo
|
||||
.save_role_mapping(&self.score.role, &choice)
|
||||
.save_role_mapping(&HostRole::Bootstrap, &choice)
|
||||
.await?;
|
||||
host = choice;
|
||||
break;
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::IntoEnumIterator;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct InspectInventoryScore {}
|
||||
|
||||
impl<T: Topology> Score<T> for InspectInventoryScore {
|
||||
fn name(&self) -> String {
|
||||
"InspectInventoryScore".to_string()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(InspectInventoryInterpret {})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct InspectInventoryInterpret;
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Interpret<T> for InspectInventoryInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
for role in HostRole::iter() {
|
||||
info!("Inspecting hosts for role {role:?}");
|
||||
let hosts = repo.get_host_for_role(&role).await?;
|
||||
info!("Hosts with role {role:?} : {}", hosts.len());
|
||||
hosts.iter().enumerate().for_each(|(idx, h)| {
|
||||
info!(
|
||||
"Found host index {idx} with role {role:?} => \n{}\n{}",
|
||||
h.summary(),
|
||||
h.parts_list()
|
||||
)
|
||||
});
|
||||
}
|
||||
Ok(Outcome::success(
|
||||
"Inventory inspection complete".to_string(),
|
||||
))
|
||||
}
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("InspectInventoryInterpret")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
mod discovery;
|
||||
pub mod inspect;
|
||||
pub use discovery::*;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
@@ -40,7 +40,6 @@ pub struct K8sIngressScore {
|
||||
pub path: Option<IngressPath>,
|
||||
pub path_type: Option<PathType>,
|
||||
pub namespace: Option<fqdn::FQDN>,
|
||||
pub ingress_class_name: Option<String>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
@@ -55,18 +54,12 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
None => PathType::Prefix,
|
||||
};
|
||||
|
||||
let ingress_class = match self.ingress_class_name.clone() {
|
||||
Some(ingress_class_name) => ingress_class_name,
|
||||
None => format!("\"default\""),
|
||||
};
|
||||
|
||||
let ingress = json!(
|
||||
{
|
||||
"metadata": {
|
||||
"name": self.name.to_string(),
|
||||
},
|
||||
"spec": {
|
||||
"ingressClassName": ingress_class.as_str(),
|
||||
"rules": [
|
||||
{ "host": self.host.to_string(),
|
||||
"http": {
|
||||
|
||||
@@ -147,7 +147,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
|
||||
port: 8080,
|
||||
path: Some(ingress_path),
|
||||
path_type: None,
|
||||
ingress_class_name: None,
|
||||
namespace: self
|
||||
.get_namespace()
|
||||
.map(|nbs| fqdn!(nbs.to_string().as_str())),
|
||||
|
||||
@@ -4,7 +4,6 @@ use std::collections::BTreeMap;
|
||||
use async_trait::async_trait;
|
||||
use k8s_openapi::api::core::v1::Secret;
|
||||
use kube::api::ObjectMeta;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
use serde_yaml::{Mapping, Value};
|
||||
@@ -12,7 +11,6 @@ use serde_yaml::{Mapping, Value};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
|
||||
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
modules::monitoring::{
|
||||
@@ -32,71 +30,6 @@ pub struct DiscordWebhook {
|
||||
pub url: Url,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
"route": {
|
||||
"receiver": self.name,
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": self.name,
|
||||
"webhookConfigs": [
|
||||
{
|
||||
"url": self.url,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}),
|
||||
};
|
||||
|
||||
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.name.clone()),
|
||||
labels: Some(std::collections::BTreeMap::from([(
|
||||
"alertmanagerConfig".to_string(),
|
||||
"enabled".to_string(),
|
||||
)])),
|
||||
namespace: Some(sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec,
|
||||
};
|
||||
debug!(
|
||||
"alertmanager_configs yaml:\n{:#?}",
|
||||
serde_yaml::to_string(&alertmanager_configs)
|
||||
);
|
||||
debug!(
|
||||
"alert manager configs: \n{:#?}",
|
||||
alertmanager_configs.clone()
|
||||
);
|
||||
|
||||
sender
|
||||
.client
|
||||
.apply(&alertmanager_configs, Some(&sender.namespace))
|
||||
.await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"installed rhob-alertmanagerconfigs for {}",
|
||||
self.name
|
||||
)))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"webhook-receiver".to_string()
|
||||
}
|
||||
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<RHOBObservability>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||
|
||||
@@ -11,8 +11,8 @@ use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
modules::monitoring::{
|
||||
kube_prometheus::{
|
||||
crd::{
|
||||
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
||||
crd::crd_alertmanager_config::{
|
||||
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
|
||||
},
|
||||
prometheus::{KubePrometheus, KubePrometheusReceiver},
|
||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||
@@ -29,71 +29,10 @@ pub struct WebhookReceiver {
|
||||
pub url: Url,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
"route": {
|
||||
"receiver": self.name,
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": self.name,
|
||||
"webhookConfigs": [
|
||||
{
|
||||
"url": self.url,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}),
|
||||
};
|
||||
|
||||
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.name.clone()),
|
||||
labels: Some(std::collections::BTreeMap::from([(
|
||||
"alertmanagerConfig".to_string(),
|
||||
"enabled".to_string(),
|
||||
)])),
|
||||
namespace: Some(sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec,
|
||||
};
|
||||
debug!(
|
||||
"alert manager configs: \n{:#?}",
|
||||
alertmanager_configs.clone()
|
||||
);
|
||||
|
||||
sender
|
||||
.client
|
||||
.apply(&alertmanager_configs, Some(&sender.namespace))
|
||||
.await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"installed rhob-alertmanagerconfigs for {}",
|
||||
self.name
|
||||
)))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"webhook-receiver".to_string()
|
||||
}
|
||||
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<RHOBObservability>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||
let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec {
|
||||
let spec = AlertmanagerConfigSpec {
|
||||
data: json!({
|
||||
"route": {
|
||||
"receiver": self.name,
|
||||
@@ -111,7 +50,7 @@ impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
||||
}),
|
||||
};
|
||||
|
||||
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfig {
|
||||
let alertmanager_configs = AlertmanagerConfig {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.name.clone()),
|
||||
labels: Some(std::collections::BTreeMap::from([(
|
||||
@@ -176,7 +115,6 @@ impl PrometheusReceiver for WebhookReceiver {
|
||||
self.get_config().await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
pub mod application_monitoring_score;
|
||||
pub mod rhobs_application_monitoring_score;
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::Application,
|
||||
monitoring::kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
||||
},
|
||||
prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
},
|
||||
score::Score,
|
||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ApplicationRHOBMonitoringScore {
|
||||
pub sender: RHOBObservability,
|
||||
pub application: Arc<dyn Application>,
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||
}
|
||||
|
||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
||||
for ApplicationRHOBMonitoringScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(ApplicationRHOBMonitoringInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!(
|
||||
"{} monitoring [ApplicationRHOBMonitoringScore]",
|
||||
self.application.name()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ApplicationRHOBMonitoringInterpret {
|
||||
score: ApplicationRHOBMonitoringScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
||||
for ApplicationRHOBMonitoringInterpret
|
||||
{
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let result = topology
|
||||
.install_prometheus(
|
||||
&self.score.sender,
|
||||
inventory,
|
||||
Some(self.score.receivers.clone()),
|
||||
)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(outcome) => match outcome {
|
||||
PreparationOutcome::Success { details: _ } => {
|
||||
Ok(Outcome::success("Prometheus installed".into()))
|
||||
}
|
||||
PreparationOutcome::Noop => Ok(Outcome::noop()),
|
||||
},
|
||||
Err(err) => Err(InterpretError::from(err)),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::ApplicationMonitoring
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -7,15 +7,5 @@ pub mod crd_prometheuses;
|
||||
pub mod grafana_default_dashboard;
|
||||
pub mod grafana_operator;
|
||||
pub mod prometheus_operator;
|
||||
pub mod rhob_alertmanager_config;
|
||||
pub mod rhob_alertmanagers;
|
||||
pub mod rhob_cluster_observability_operator;
|
||||
pub mod rhob_default_rules;
|
||||
pub mod rhob_grafana;
|
||||
pub mod rhob_monitoring_stack;
|
||||
pub mod rhob_prometheus_rules;
|
||||
pub mod rhob_prometheuses;
|
||||
pub mod rhob_role;
|
||||
pub mod rhob_service_monitor;
|
||||
pub mod role;
|
||||
pub mod service_monitor;
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::topology::{
|
||||
k8s::K8sClient,
|
||||
oberservability::monitoring::{AlertReceiver, AlertSender},
|
||||
};
|
||||
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "monitoring.rhobs",
|
||||
version = "v1alpha1",
|
||||
kind = "AlertmanagerConfig",
|
||||
plural = "alertmanagerconfigs",
|
||||
namespaced
|
||||
)]
|
||||
pub struct AlertmanagerConfigSpec {
|
||||
#[serde(flatten)]
|
||||
pub data: serde_json::Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RHOBObservability {
|
||||
pub namespace: String,
|
||||
pub client: Arc<K8sClient>,
|
||||
}
|
||||
|
||||
impl AlertSender for RHOBObservability {
|
||||
fn name(&self) -> String {
|
||||
"RHOBAlertManager".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn AlertReceiver<RHOBObservability>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Box<dyn AlertReceiver<RHOBObservability>> {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::crd_prometheuses::LabelSelector;
|
||||
|
||||
/// Rust CRD for `Alertmanager` from Prometheus Operator
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "monitoring.rhobs",
|
||||
version = "v1",
|
||||
kind = "Alertmanager",
|
||||
plural = "alertmanagers",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AlertmanagerSpec {
|
||||
/// Number of replicas for HA
|
||||
pub replicas: i32,
|
||||
|
||||
/// Selectors for AlertmanagerConfig CRDs
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub alertmanager_config_selector: Option<LabelSelector>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub alertmanager_config_namespace_selector: Option<LabelSelector>,
|
||||
|
||||
/// Optional pod template metadata (annotations, labels)
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub pod_metadata: Option<LabelSelector>,
|
||||
|
||||
/// Optional topology spread settings
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for AlertmanagerSpec {
|
||||
fn default() -> Self {
|
||||
AlertmanagerSpec {
|
||||
replicas: 1,
|
||||
|
||||
// Match all AlertmanagerConfigs in the same namespace
|
||||
alertmanager_config_namespace_selector: None,
|
||||
|
||||
// Empty selector matches all AlertmanagerConfigs in that namespace
|
||||
alertmanager_config_selector: Some(LabelSelector::default()),
|
||||
|
||||
pod_metadata: None,
|
||||
version: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
|
||||
use crate::modules::helm::chart::HelmChartScore;
|
||||
//TODO package chart or something for COO okd
|
||||
pub fn rhob_cluster_observability_operator() -> HelmChartScore {
|
||||
HelmChartScore {
|
||||
namespace: None,
|
||||
release_name: NonBlankString::from_str("").unwrap(),
|
||||
chart_name: NonBlankString::from_str(
|
||||
"oci://hub.nationtech.io/harmony/nt-prometheus-operator",
|
||||
)
|
||||
.unwrap(),
|
||||
chart_version: None,
|
||||
values_overrides: None,
|
||||
values_yaml: None,
|
||||
create_namespace: true,
|
||||
install_only: true,
|
||||
repository: None,
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
use crate::modules::{
|
||||
monitoring::kube_prometheus::crd::rhob_prometheus_rules::Rule,
|
||||
prometheus::alerts::k8s::{
|
||||
deployment::alert_deployment_unavailable,
|
||||
pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
|
||||
pvc::high_pvc_fill_rate_over_two_days,
|
||||
service::alert_service_down,
|
||||
},
|
||||
};
|
||||
|
||||
pub fn build_default_application_rules() -> Vec<Rule> {
|
||||
let pod_failed: Rule = pod_failed().into();
|
||||
let container_restarting: Rule = alert_container_restarting().into();
|
||||
let pod_not_ready: Rule = alert_pod_not_ready().into();
|
||||
let service_down: Rule = alert_service_down().into();
|
||||
let deployment_unavailable: Rule = alert_deployment_unavailable().into();
|
||||
let high_pvc_fill_rate: Rule = high_pvc_fill_rate_over_two_days().into();
|
||||
vec![
|
||||
pod_failed,
|
||||
container_restarting,
|
||||
pod_not_ready,
|
||||
service_down,
|
||||
deployment_unavailable,
|
||||
high_pvc_fill_rate,
|
||||
]
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "grafana.integreatly.org",
|
||||
version = "v1beta1",
|
||||
kind = "Grafana",
|
||||
plural = "grafanas",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaSpec {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<GrafanaConfig>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub admin_user: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub admin_password: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub ingress: Option<GrafanaIngress>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub persistence: Option<GrafanaPersistence>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub resources: Option<ResourceRequirements>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaConfig {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub log: Option<GrafanaLogConfig>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub security: Option<GrafanaSecurityConfig>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaLogConfig {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub level: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaSecurityConfig {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub admin_user: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub admin_password: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaIngress {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub enabled: Option<bool>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub hosts: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaPersistence {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub enabled: Option<bool>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub storage_class_name: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub size: Option<String>,
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "grafana.integreatly.org",
|
||||
version = "v1beta1",
|
||||
kind = "GrafanaDashboard",
|
||||
plural = "grafanadashboards",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaDashboardSpec {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub resync_period: Option<String>,
|
||||
|
||||
pub instance_selector: LabelSelector,
|
||||
|
||||
pub json: String,
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "grafana.integreatly.org",
|
||||
version = "v1beta1",
|
||||
kind = "GrafanaDatasource",
|
||||
plural = "grafanadatasources",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaDatasourceSpec {
|
||||
pub instance_selector: LabelSelector,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub allow_cross_namespace_import: Option<bool>,
|
||||
|
||||
pub datasource: GrafanaDatasourceConfig,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrafanaDatasourceConfig {
|
||||
pub access: String,
|
||||
pub database: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub json_data: Option<BTreeMap<String, String>>,
|
||||
pub name: String,
|
||||
pub r#type: String,
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResourceRequirements {
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub limits: BTreeMap<String, String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub requests: BTreeMap<String, String>,
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
LabelSelector, PrometheusSpec,
|
||||
};
|
||||
|
||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "monitoring.rhobs",
|
||||
version = "v1alpha1",
|
||||
kind = "MonitoringStack",
|
||||
plural = "monitoringstacks",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct MonitoringStackSpec {
|
||||
/// Verbosity of logs (e.g. "debug", "info", "warn", "error").
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub log_level: Option<String>,
|
||||
|
||||
/// Retention period for Prometheus TSDB data (e.g. "1d").
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub retention: Option<String>,
|
||||
|
||||
/// Resource selector for workloads monitored by this stack.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub resource_selector: Option<LabelSelector>,
|
||||
}
|
||||
|
||||
impl Default for MonitoringStackSpec {
|
||||
fn default() -> Self {
|
||||
MonitoringStackSpec {
|
||||
log_level: Some("info".into()),
|
||||
retention: Some("7d".into()),
|
||||
resource_selector: Some(LabelSelector::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
|
||||
|
||||
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "monitoring.rhobs",
|
||||
version = "v1",
|
||||
kind = "PrometheusRule",
|
||||
plural = "prometheusrules",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PrometheusRuleSpec {
|
||||
pub groups: Vec<RuleGroup>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct RuleGroup {
|
||||
pub name: String,
|
||||
pub rules: Vec<Rule>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Rule {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub alert: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub expr: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub for_: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub labels: Option<std::collections::BTreeMap<String, String>>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub annotations: Option<std::collections::BTreeMap<String, String>>,
|
||||
}
|
||||
|
||||
impl From<PrometheusAlertRule> for Rule {
|
||||
fn from(value: PrometheusAlertRule) -> Self {
|
||||
Rule {
|
||||
alert: Some(value.alert),
|
||||
expr: Some(value.expr),
|
||||
for_: value.r#for,
|
||||
labels: Some(value.labels.into_iter().collect::<BTreeMap<_, _>>()),
|
||||
annotations: Some(value.annotations.into_iter().collect::<BTreeMap<_, _>>()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::types::Operator;
|
||||
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "monitoring.rhobs",
|
||||
version = "v1",
|
||||
kind = "Prometheus",
|
||||
plural = "prometheuses",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PrometheusSpec {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub alerting: Option<PrometheusSpecAlerting>,
|
||||
|
||||
pub service_account_name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub service_monitor_namespace_selector: Option<LabelSelector>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub service_monitor_selector: Option<LabelSelector>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub service_discovery_role: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub pod_monitor_selector: Option<LabelSelector>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub rule_selector: Option<LabelSelector>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub rule_namespace_selector: Option<LabelSelector>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NamespaceSelector {
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub match_names: Vec<String>,
|
||||
}
|
||||
|
||||
/// Contains alerting configuration, specifically Alertmanager endpoints.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
|
||||
pub struct PrometheusSpecAlerting {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub alertmanagers: Option<Vec<AlertmanagerEndpoints>>,
|
||||
}
|
||||
|
||||
/// Represents an Alertmanager endpoint configuration used by Prometheus.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
|
||||
pub struct AlertmanagerEndpoints {
|
||||
/// Name of the Alertmanager Service.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
|
||||
/// Namespace of the Alertmanager Service.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub namespace: Option<String>,
|
||||
|
||||
/// Port to access on the Alertmanager Service (e.g. "web").
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<String>,
|
||||
|
||||
/// Scheme to use for connecting (e.g. "http").
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub scheme: Option<String>,
|
||||
// Other fields like `tls_config`, `path_prefix`, etc., can be added if needed.
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LabelSelector {
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub match_labels: BTreeMap<String, String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub match_expressions: Vec<LabelSelectorRequirement>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LabelSelectorRequirement {
|
||||
pub key: String,
|
||||
pub operator: Operator,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub values: Vec<String>,
|
||||
}
|
||||
|
||||
impl Default for PrometheusSpec {
|
||||
fn default() -> Self {
|
||||
PrometheusSpec {
|
||||
alerting: None,
|
||||
|
||||
service_account_name: "prometheus".into(),
|
||||
|
||||
// null means "only my namespace"
|
||||
service_monitor_namespace_selector: None,
|
||||
|
||||
// empty selector means match all ServiceMonitors in that namespace
|
||||
service_monitor_selector: Some(LabelSelector::default()),
|
||||
|
||||
service_discovery_role: Some("Endpoints".into()),
|
||||
|
||||
pod_monitor_selector: None,
|
||||
|
||||
rule_selector: None,
|
||||
|
||||
rule_namespace_selector: Some(LabelSelector::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
use k8s_openapi::api::{
|
||||
core::v1::ServiceAccount,
|
||||
rbac::v1::{PolicyRule, Role, RoleBinding, RoleRef, Subject},
|
||||
};
|
||||
use kube::api::ObjectMeta;
|
||||
|
||||
pub fn build_prom_role(role_name: String, namespace: String) -> Role {
|
||||
Role {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(role_name),
|
||||
namespace: Some(namespace),
|
||||
..Default::default()
|
||||
},
|
||||
rules: Some(vec![PolicyRule {
|
||||
api_groups: Some(vec!["".into()]), // core API group
|
||||
resources: Some(vec!["services".into(), "endpoints".into(), "pods".into()]),
|
||||
verbs: vec!["get".into(), "list".into(), "watch".into()],
|
||||
..Default::default()
|
||||
}]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_prom_rolebinding(
|
||||
role_name: String,
|
||||
namespace: String,
|
||||
service_account_name: String,
|
||||
) -> RoleBinding {
|
||||
RoleBinding {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{}-rolebinding", role_name)),
|
||||
namespace: Some(namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
role_ref: RoleRef {
|
||||
api_group: "rbac.authorization.k8s.io".into(),
|
||||
kind: "Role".into(),
|
||||
name: role_name,
|
||||
},
|
||||
subjects: Some(vec![Subject {
|
||||
kind: "ServiceAccount".into(),
|
||||
name: service_account_name,
|
||||
namespace: Some(namespace.clone()),
|
||||
..Default::default()
|
||||
}]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_prom_service_account(
|
||||
service_account_name: String,
|
||||
namespace: String,
|
||||
) -> ServiceAccount {
|
||||
ServiceAccount {
|
||||
automount_service_account_token: None,
|
||||
image_pull_secrets: None,
|
||||
metadata: ObjectMeta {
|
||||
name: Some(service_account_name),
|
||||
namespace: Some(namespace),
|
||||
..Default::default()
|
||||
},
|
||||
secrets: None,
|
||||
}
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::types::{
|
||||
HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector,
|
||||
ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint,
|
||||
};
|
||||
|
||||
/// This is the top-level struct for the ServiceMonitor Custom Resource.
|
||||
/// The `#[derive(CustomResource)]` macro handles all the boilerplate for you,
|
||||
/// including the `impl Resource`.
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[kube(
|
||||
group = "monitoring.rhobs",
|
||||
version = "v1",
|
||||
kind = "ServiceMonitor",
|
||||
plural = "servicemonitors",
|
||||
namespaced
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ServiceMonitorSpec {
|
||||
/// A label selector to select services to monitor.
|
||||
pub selector: Selector,
|
||||
|
||||
/// A list of endpoints on the selected services to be monitored.
|
||||
pub endpoints: Vec<ServiceMonitorEndpoint>,
|
||||
|
||||
/// Selector to select which namespaces the Kubernetes Endpoints objects
|
||||
/// are discovered from.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub namespace_selector: Option<NamespaceSelector>,
|
||||
|
||||
/// The label to use to retrieve the job name from.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub job_label: Option<String>,
|
||||
|
||||
/// Pod-based target labels to transfer from the Kubernetes Pod onto the target.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub pod_target_labels: Vec<String>,
|
||||
|
||||
/// TargetLabels transfers labels on the Kubernetes Service object to the target.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub target_labels: Vec<String>,
|
||||
}
|
||||
|
||||
impl Default for ServiceMonitorSpec {
|
||||
fn default() -> Self {
|
||||
let labels = HashMap::new();
|
||||
Self {
|
||||
selector: Selector {
|
||||
match_labels: { labels },
|
||||
match_expressions: vec![MatchExpression {
|
||||
key: "app.kubernetes.io/name".into(),
|
||||
operator: Operator::Exists,
|
||||
values: vec![],
|
||||
}],
|
||||
},
|
||||
endpoints: vec![ServiceMonitorEndpoint {
|
||||
port: Some("http".to_string()),
|
||||
path: Some("/metrics".into()),
|
||||
interval: Some("30s".into()),
|
||||
scheme: Some(HTTPScheme::HTTP),
|
||||
..Default::default()
|
||||
}],
|
||||
namespace_selector: None, // only the same namespace
|
||||
job_label: Some("app".into()),
|
||||
pod_target_labels: vec![],
|
||||
target_labels: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<KubeServiceMonitor> for ServiceMonitorSpec {
|
||||
fn from(value: KubeServiceMonitor) -> Self {
|
||||
Self {
|
||||
selector: value.selector,
|
||||
endpoints: value.endpoints,
|
||||
namespace_selector: value.namespace_selector,
|
||||
job_label: value.job_label,
|
||||
pod_target_labels: value.pod_target_labels,
|
||||
target_labels: value.target_labels,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -45,12 +45,6 @@ service:
|
||||
|
||||
ingress:
|
||||
enabled: {ingress_enabled}
|
||||
hosts:
|
||||
- host: {host}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
|
||||
|
||||
route:
|
||||
enabled: {route_enabled}
|
||||
|
||||
@@ -100,7 +100,7 @@ When you can dig them, confirm to continue.
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
|
||||
while bootstrap_host.is_none() {
|
||||
let hosts = repo.get_host_for_role(&HostRole::Bootstrap).await?;
|
||||
let hosts = repo.get_host_for_role(HostRole::Bootstrap).await?;
|
||||
bootstrap_host = hosts.into_iter().next().to_owned();
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::Bootstrap,
|
||||
|
||||
@@ -67,7 +67,7 @@ impl OKDSetup02BootstrapInterpret {
|
||||
async fn get_bootstrap_node(&self) -> Result<PhysicalHost, InterpretError> {
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
match repo
|
||||
.get_host_for_role(&HostRole::Bootstrap)
|
||||
.get_host_for_role(HostRole::Bootstrap)
|
||||
.await?
|
||||
.into_iter()
|
||||
.next()
|
||||
@@ -371,7 +371,7 @@ impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
|
||||
self.prepare_ignition_files(inventory, topology).await?;
|
||||
self.render_per_mac_pxe(inventory, topology).await?;
|
||||
self.setup_bootstrap_load_balancer(inventory, topology)
|
||||
.await?;
|
||||
.await?;
|
||||
|
||||
// TODO https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-user-provisioned-validating-dns_installing-bare-metal
|
||||
// self.validate_dns_config(inventory, topology).await?;
|
||||
|
||||
@@ -7,17 +7,19 @@ use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
||||
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::IPxeMacBootFileScore,
|
||||
inventory::DiscoverHostForRoleScore,
|
||||
okd::templates::BootstrapIpxeTpl,
|
||||
},
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
data::Version,
|
||||
};
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// Step 03: Control Plane
|
||||
@@ -64,7 +66,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||
const REQUIRED_HOSTS: usize = 3;
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
let mut control_plane_hosts = repo.get_host_for_role(HostRole::ControlPlane).await?;
|
||||
|
||||
while control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
info!(
|
||||
@@ -78,7 +80,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
control_plane_hosts = repo.get_host_for_role(HostRole::ControlPlane).await?;
|
||||
}
|
||||
|
||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
@@ -89,10 +91,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
)))
|
||||
} else {
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(control_plane_hosts
|
||||
.into_iter()
|
||||
.take(REQUIRED_HOSTS)
|
||||
.collect())
|
||||
Ok(control_plane_hosts.into_iter().take(REQUIRED_HOSTS).collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +121,8 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
.map(|(logical_host, physical_host)| {
|
||||
info!(
|
||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
||||
logical_host.name, physical_host.id
|
||||
logical_host.name,
|
||||
physical_host.id
|
||||
);
|
||||
HostBinding {
|
||||
logical_host: logical_host.clone(),
|
||||
@@ -187,15 +187,12 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
/// Prompts the user to reboot the target control plane nodes.
|
||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||
info!(
|
||||
"[ControlPlane] Requesting reboot for control plane nodes: {:?}",
|
||||
node_ids
|
||||
);
|
||||
|
||||
info!("[ControlPlane] Requesting reboot for control plane nodes: {:?}", node_ids);
|
||||
|
||||
let confirmation = inquire::Confirm::new(
|
||||
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
||||
)
|
||||
@@ -203,9 +200,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
|
||||
|
||||
if !confirmation {
|
||||
return Err(InterpretError::new(
|
||||
"User aborted the operation.".to_string(),
|
||||
));
|
||||
return Err(InterpretError::new("User aborted the operation.".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -252,8 +247,7 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
||||
let nodes = self.get_nodes(inventory, topology).await?;
|
||||
|
||||
// 2. Create DHCP reservations for the control plane nodes.
|
||||
self.configure_host_binding(inventory, topology, &nodes)
|
||||
.await?;
|
||||
self.configure_host_binding(inventory, topology, &nodes).await?;
|
||||
|
||||
// 3. Create iPXE files for each control plane node to boot from the master ignition.
|
||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, error, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::{
|
||||
score::Score,
|
||||
topology::{
|
||||
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
|
||||
LoadBalancerService, SSL, Topology,
|
||||
LoadBalancerService, Topology, SSL,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -44,7 +44,7 @@ impl OKDBootstrapLoadBalancerScore {
|
||||
"/readyz".to_string(),
|
||||
HttpMethod::GET,
|
||||
HttpStatusCode::Success2xx,
|
||||
SSL::SSL,
|
||||
SSL::SSL
|
||||
)),
|
||||
},
|
||||
];
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::{
|
||||
score::Score,
|
||||
topology::{
|
||||
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
|
||||
LoadBalancerService, SSL, Topology,
|
||||
LoadBalancerService, Topology, SSL,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule {
|
||||
pub fn alert_container_restarting() -> PrometheusAlertRule {
|
||||
PrometheusAlertRule {
|
||||
alert: "ContainerRestarting".into(),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(),
|
||||
r#for: Some("30s".into()),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(),
|
||||
r#for: Some("5m".into()),
|
||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||
annotations: HashMap::from([
|
||||
(
|
||||
|
||||
@@ -197,6 +197,11 @@ impl K8sPrometheusCRDAlertingInterpret {
|
||||
}
|
||||
|
||||
async fn ensure_grafana_operator(&self) -> Result<Outcome, InterpretError> {
|
||||
if self.crd_exists("grafanas.grafana.integreatly.org").await {
|
||||
debug!("grafana CRDs already exist — skipping install.");
|
||||
return Ok(Outcome::success("Grafana CRDs already exist".to_string()));
|
||||
}
|
||||
|
||||
let _ = Command::new("helm")
|
||||
.args([
|
||||
"repo",
|
||||
|
||||
@@ -2,4 +2,3 @@ pub mod alerts;
|
||||
pub mod k8s_prometheus_alerting_score;
|
||||
#[allow(clippy::module_inception)]
|
||||
pub mod prometheus;
|
||||
pub mod rhob_alerting_score;
|
||||
|
||||
@@ -1,535 +0,0 @@
|
||||
use fqdn::fqdn;
|
||||
use std::fs;
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
use tempfile::tempdir;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::ObjectMeta;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
||||
Alertmanager, AlertmanagerSpec,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
|
||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||
GrafanaDatasourceSpec, GrafanaSpec,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
|
||||
MonitoringStack, MonitoringStackSpec,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
|
||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
|
||||
};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
|
||||
build_prom_role, build_prom_rolebinding, build_prom_service_account,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
||||
ServiceMonitor, ServiceMonitorSpec,
|
||||
};
|
||||
use crate::score::Score;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use super::prometheus::PrometheusApplicationMonitoring;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct RHOBAlertingScore {
|
||||
pub sender: RHOBObservability,
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||
pub service_monitors: Vec<ServiceMonitor>,
|
||||
pub prometheus_rules: Vec<RuleGroup>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Score<T> for RHOBAlertingScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
Box::new(RHOBAlertingInterpret {
|
||||
sender: self.sender.clone(),
|
||||
receivers: self.receivers.clone(),
|
||||
service_monitors: self.service_monitors.clone(),
|
||||
prometheus_rules: self.prometheus_rules.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"RHOB alerting [RHOBAlertingScore]".into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RHOBAlertingInterpret {
|
||||
pub sender: RHOBObservability,
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||
pub service_monitors: Vec<ServiceMonitor>,
|
||||
pub prometheus_rules: Vec<RuleGroup>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Interpret<T> for RHOBAlertingInterpret
|
||||
{
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let client = topology.k8s_client().await.unwrap();
|
||||
self.ensure_grafana_operator().await?;
|
||||
self.install_prometheus(inventory, topology, &client)
|
||||
.await?;
|
||||
self.install_client_kube_metrics().await?;
|
||||
self.install_grafana(inventory, topology, &client).await?;
|
||||
self.install_receivers(&self.sender, &self.receivers)
|
||||
.await?;
|
||||
self.install_rules(&self.prometheus_rules, &client).await?;
|
||||
self.install_monitors(self.service_monitors.clone(), &client)
|
||||
.await?;
|
||||
Ok(Outcome::success(
|
||||
"K8s monitoring components installed".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::RHOBAlerting
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl RHOBAlertingInterpret {
|
||||
async fn crd_exists(&self, crd: &str) -> bool {
|
||||
let status = Command::new("sh")
|
||||
.args(["-c", &format!("kubectl get crd -A | grep -i {crd}")])
|
||||
.status()
|
||||
.map_err(|e| InterpretError::new(format!("could not connect to cluster: {}", e)))
|
||||
.unwrap();
|
||||
|
||||
status.success()
|
||||
}
|
||||
|
||||
async fn install_chart(
|
||||
&self,
|
||||
chart_path: String,
|
||||
chart_name: String,
|
||||
) -> Result<(), InterpretError> {
|
||||
let temp_dir =
|
||||
tempdir().map_err(|e| InterpretError::new(format!("Tempdir error: {}", e)))?;
|
||||
let temp_path = temp_dir.path().to_path_buf();
|
||||
debug!("Using temp directory: {}", temp_path.display());
|
||||
let chart = format!("{}/{}", chart_path, chart_name);
|
||||
let pull_output = Command::new("helm")
|
||||
.args(["pull", &chart, "--destination", temp_path.to_str().unwrap()])
|
||||
.output()
|
||||
.map_err(|e| InterpretError::new(format!("Helm pull error: {}", e)))?;
|
||||
|
||||
if !pull_output.status.success() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Helm pull failed: {}",
|
||||
String::from_utf8_lossy(&pull_output.stderr)
|
||||
)));
|
||||
}
|
||||
|
||||
let tgz_path = fs::read_dir(&temp_path)
|
||||
.unwrap()
|
||||
.filter_map(|entry| {
|
||||
let entry = entry.ok()?;
|
||||
let path = entry.path();
|
||||
if path.extension()? == "tgz" {
|
||||
Some(path)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.next()
|
||||
.ok_or_else(|| InterpretError::new("Could not find pulled Helm chart".into()))?;
|
||||
|
||||
debug!("Installing chart from: {}", tgz_path.display());
|
||||
|
||||
let install_output = Command::new("helm")
|
||||
.args([
|
||||
"upgrade",
|
||||
"--install",
|
||||
&chart_name,
|
||||
tgz_path.to_str().unwrap(),
|
||||
"--namespace",
|
||||
&self.sender.namespace.clone(),
|
||||
"--create-namespace",
|
||||
"--wait",
|
||||
"--atomic",
|
||||
])
|
||||
.output()
|
||||
.map_err(|e| InterpretError::new(format!("Helm install error: {}", e)))?;
|
||||
|
||||
if !install_output.status.success() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Helm install failed: {}",
|
||||
String::from_utf8_lossy(&install_output.stderr)
|
||||
)));
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Installed chart {}/{} in namespace: {}",
|
||||
&chart_path,
|
||||
&chart_name,
|
||||
self.sender.namespace.clone()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn ensure_grafana_operator(&self) -> Result<Outcome, InterpretError> {
|
||||
let _ = Command::new("helm")
|
||||
.args([
|
||||
"repo",
|
||||
"add",
|
||||
"grafana-operator",
|
||||
"https://grafana.github.io/helm-charts",
|
||||
])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let _ = Command::new("helm")
|
||||
.args(["repo", "update"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let output = Command::new("helm")
|
||||
.args([
|
||||
"upgrade",
|
||||
"--install",
|
||||
"grafana-operator",
|
||||
"grafana-operator/grafana-operator",
|
||||
"--namespace",
|
||||
&self.sender.namespace.clone(),
|
||||
"--create-namespace",
|
||||
"--set",
|
||||
"namespaceScope=true",
|
||||
])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"helm upgrade --install failed:\nstdout: {}\nstderr: {}",
|
||||
String::from_utf8_lossy(&output.stdout),
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"installed grafana operator in ns {}",
|
||||
self.sender.namespace.clone()
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_prometheus<T: Topology + K8sclient + Ingress>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
debug!(
|
||||
"installing crd-prometheuses in namespace {}",
|
||||
self.sender.namespace.clone()
|
||||
);
|
||||
debug!("building role/rolebinding/serviceaccount for crd-prometheus");
|
||||
|
||||
let stack = MonitoringStack {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
labels: Some([("monitoring-stack".into(), "true".into())].into()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: MonitoringStackSpec {
|
||||
log_level: Some("debug".into()),
|
||||
retention: Some("1d".into()),
|
||||
resource_selector: Some(LabelSelector {
|
||||
match_labels: Default::default(),
|
||||
match_expressions: vec![],
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
client
|
||||
.apply(&stack, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
let alert_manager_domain = topology
|
||||
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-alert-manager", self.sender.namespace.clone());
|
||||
let backend_service = format!("alertmanager-operated");
|
||||
let namespace = self.sender.namespace.clone();
|
||||
let alert_manager_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&alert_manager_domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 9093,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
let prometheus_domain = topology
|
||||
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-prometheus", self.sender.namespace.clone());
|
||||
let backend_service = format!("prometheus-operated");
|
||||
let prometheus_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&prometheus_domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 9090,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
alert_manager_ingress.interpret(inventory, topology).await?;
|
||||
prometheus_ingress.interpret(inventory, topology).await?;
|
||||
info!("installed rhob monitoring stack",);
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed rhob-prometheus {:#?}",
|
||||
stack
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_monitors(
|
||||
&self,
|
||||
mut monitors: Vec<ServiceMonitor>,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let default_service_monitor = ServiceMonitor {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.sender.namespace.clone()),
|
||||
labels: Some(std::collections::BTreeMap::from([
|
||||
("alertmanagerConfig".to_string(), "enabled".to_string()),
|
||||
("client".to_string(), "prometheus".to_string()),
|
||||
(
|
||||
"app.kubernetes.io/name".to_string(),
|
||||
"kube-state-metrics".to_string(),
|
||||
),
|
||||
])),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: ServiceMonitorSpec::default(),
|
||||
};
|
||||
monitors.push(default_service_monitor);
|
||||
for monitor in monitors.iter() {
|
||||
client
|
||||
.apply(monitor, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
}
|
||||
Ok(Outcome::success(
|
||||
"succesfully deployed service monitors".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn install_rules(
|
||||
&self,
|
||||
#[allow(clippy::ptr_arg)] rules: &Vec<RuleGroup>,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut prom_rule_spec = PrometheusRuleSpec {
|
||||
groups: rules.clone(),
|
||||
};
|
||||
|
||||
let default_rules_group = RuleGroup {
|
||||
name: "default-rules".to_string(),
|
||||
rules: crate::modules::monitoring::kube_prometheus::crd::rhob_default_rules::build_default_application_rules(),
|
||||
};
|
||||
|
||||
prom_rule_spec.groups.push(default_rules_group);
|
||||
let prom_rules = PrometheusRule {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.sender.namespace.clone()),
|
||||
labels: Some(std::collections::BTreeMap::from([
|
||||
("alertmanagerConfig".to_string(), "enabled".to_string()),
|
||||
("role".to_string(), "prometheus-rule".to_string()),
|
||||
])),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: prom_rule_spec,
|
||||
};
|
||||
client
|
||||
.apply(&prom_rules, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed rules {:#?}",
|
||||
prom_rules.metadata.name
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_client_kube_metrics(&self) -> Result<Outcome, InterpretError> {
|
||||
self.install_chart(
|
||||
"oci://hub.nationtech.io/harmony".to_string(),
|
||||
"nt-kube-metrics".to_string(),
|
||||
)
|
||||
.await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"Installed client kube metrics in ns {}",
|
||||
&self.sender.namespace.clone()
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_grafana<T: Topology + K8sclient + Ingress>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut label = BTreeMap::new();
|
||||
label.insert("dashboards".to_string(), "grafana".to_string());
|
||||
let labels = LabelSelector {
|
||||
match_labels: label.clone(),
|
||||
match_expressions: vec![],
|
||||
};
|
||||
let mut json_data = BTreeMap::new();
|
||||
json_data.insert("timeInterval".to_string(), "5s".to_string());
|
||||
let namespace = self.sender.namespace.clone();
|
||||
|
||||
let json = build_default_dashboard(&namespace);
|
||||
|
||||
let graf_data_source = GrafanaDatasource {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!(
|
||||
"grafana-datasource-{}",
|
||||
self.sender.namespace.clone()
|
||||
)),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: GrafanaDatasourceSpec {
|
||||
instance_selector: labels.clone(),
|
||||
allow_cross_namespace_import: Some(false),
|
||||
datasource: GrafanaDatasourceConfig {
|
||||
access: "proxy".to_string(),
|
||||
database: Some("prometheus".to_string()),
|
||||
json_data: Some(json_data),
|
||||
//this is fragile
|
||||
name: format!("prometheus-{}-0", self.sender.namespace.clone()),
|
||||
r#type: "prometheus".to_string(),
|
||||
url: format!(
|
||||
"http://prometheus-operated.{}.svc.cluster.local:9090",
|
||||
self.sender.namespace.clone()
|
||||
),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
client
|
||||
.apply(&graf_data_source, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
let graf_dashboard = GrafanaDashboard {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!(
|
||||
"grafana-dashboard-{}",
|
||||
self.sender.namespace.clone()
|
||||
)),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: GrafanaDashboardSpec {
|
||||
resync_period: Some("30s".to_string()),
|
||||
instance_selector: labels.clone(),
|
||||
json,
|
||||
},
|
||||
};
|
||||
|
||||
client
|
||||
.apply(&graf_dashboard, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
let grafana = Grafana {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("grafana-{}", self.sender.namespace.clone())),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
labels: Some(label.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: GrafanaSpec {
|
||||
config: None,
|
||||
admin_user: None,
|
||||
admin_password: None,
|
||||
ingress: None,
|
||||
persistence: None,
|
||||
resources: None,
|
||||
},
|
||||
};
|
||||
client
|
||||
.apply(&grafana, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
let domain = topology
|
||||
.get_domain(&format!("grafana-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-grafana", self.sender.namespace.clone());
|
||||
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
|
||||
let grafana_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 3000,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
grafana_ingress.interpret(inventory, topology).await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed grafana instance {:#?}",
|
||||
grafana.metadata.name
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_receivers(
|
||||
&self,
|
||||
sender: &RHOBObservability,
|
||||
receivers: &Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
for receiver in receivers.iter() {
|
||||
receiver.install(sender).await.map_err(|err| {
|
||||
InterpretError::new(format!("failed to install receiver: {}", err))
|
||||
})?;
|
||||
}
|
||||
Ok(Outcome::success("successfully deployed receivers".into()))
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,6 @@ serde = "1.0.217"
|
||||
serde_yaml = "0.9.34"
|
||||
syn = "2.0.90"
|
||||
cidr.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
|
||||
@@ -145,71 +145,3 @@ pub fn cidrv4(input: TokenStream) -> TokenStream {
|
||||
|
||||
panic!("Invalid IPv4 CIDR : {}", cidr_str);
|
||||
}
|
||||
|
||||
/// Creates a `harmony_types::net::Url::Url` from a string literal.
|
||||
///
|
||||
/// This macro parses the input string as a URL at compile time and will cause a
|
||||
/// compilation error if the string is not a valid URL.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use harmony_types::net::Url;
|
||||
/// use harmony_macros::hurl;
|
||||
///
|
||||
/// let url = hurl!("https://example.com/path");
|
||||
///
|
||||
/// let expected_url = url::Url::parse("https://example.com/path").unwrap();
|
||||
/// assert!(matches!(url, Url::Url(expected_url)));
|
||||
/// ```
|
||||
///
|
||||
/// The following example will fail to compile:
|
||||
///
|
||||
/// ```rust,compile_fail
|
||||
/// use harmony_macros::hurl;
|
||||
///
|
||||
/// // This is not a valid URL and will cause a compilation error.
|
||||
/// let _invalid = hurl!("not a valid url");
|
||||
/// ```
|
||||
#[proc_macro]
|
||||
pub fn hurl(input: TokenStream) -> TokenStream {
|
||||
let input_lit = parse_macro_input!(input as LitStr);
|
||||
let url_str = input_lit.value();
|
||||
|
||||
match ::url::Url::parse(&url_str) {
|
||||
Ok(_) => {
|
||||
let expanded = quote! {
|
||||
::harmony_types::net::Url::Url(::url::Url::parse(#input_lit).unwrap())
|
||||
};
|
||||
TokenStream::from(expanded)
|
||||
}
|
||||
Err(e) => {
|
||||
let err_msg = format!("Invalid URL: {e}");
|
||||
syn::Error::new(input_lit.span(), err_msg)
|
||||
.to_compile_error()
|
||||
.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a `harmony_types::net::Url::LocalFolder` from a string literal.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use harmony_types::net::Url;
|
||||
/// use harmony_macros::local_folder;
|
||||
///
|
||||
/// let local_path = local_folder!("/var/data/files");
|
||||
///
|
||||
/// let expected_path = String::from("/var/data/files");
|
||||
/// assert!(matches!(local_path, Url::LocalFolder(expected_path)));
|
||||
/// ```
|
||||
#[proc_macro]
|
||||
pub fn local_folder(input: TokenStream) -> TokenStream {
|
||||
let input_lit = parse_macro_input!(input as LitStr);
|
||||
let expanded = quote! {
|
||||
::harmony_types::net::Url::LocalFolder(#input_lit.to_string())
|
||||
};
|
||||
TokenStream::from(expanded)
|
||||
}
|
||||
|
||||
@@ -120,26 +120,10 @@ impl SecretManager {
|
||||
|
||||
let ns = &manager.namespace;
|
||||
let key = T::KEY;
|
||||
let secret_json = inquire::Editor::new(&format!(
|
||||
"Secret not found for {ns} {key}, paste the JSON here :",
|
||||
let secret_json = inquire::Text::new(&format!(
|
||||
"Secret not found for {} {}, paste the JSON here :",
|
||||
ns, key
|
||||
))
|
||||
.with_formatter(&|data| {
|
||||
let char_count = data.chars().count();
|
||||
if char_count == 0 {
|
||||
String::from("<skipped>")
|
||||
} else if char_count <= 20 {
|
||||
data.into()
|
||||
} else {
|
||||
let mut substr: String = data.chars().take(17).collect();
|
||||
substr.push_str("...");
|
||||
substr
|
||||
}
|
||||
})
|
||||
.with_render_config(
|
||||
inquire::ui::RenderConfig::default().with_canceled_prompt_indicator(
|
||||
inquire::ui::Styled::new("<skipped>").with_fg(inquire::ui::Color::DarkYellow),
|
||||
),
|
||||
)
|
||||
.prompt()
|
||||
.map_err(|e| {
|
||||
SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into())
|
||||
|
||||
@@ -29,12 +29,20 @@ impl SecretStore for LocalFileSecretStore {
|
||||
file_path.display()
|
||||
);
|
||||
|
||||
tokio::fs::read(&file_path)
|
||||
.await
|
||||
.map_err(|_| SecretStoreError::NotFound {
|
||||
namespace: ns.to_string(),
|
||||
key: key.to_string(),
|
||||
})
|
||||
let content =
|
||||
tokio::fs::read(&file_path)
|
||||
.await
|
||||
.map_err(|_| SecretStoreError::NotFound {
|
||||
namespace: ns.to_string(),
|
||||
key: key.to_string(),
|
||||
})?;
|
||||
info!(
|
||||
"Sum of all vec get {ns} {key} {:?}",
|
||||
content
|
||||
.iter()
|
||||
.fold(0, |acc: u64, val: &u8| { acc + *val as u64 })
|
||||
);
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
async fn set_raw(&self, ns: &str, key: &str, val: &[u8]) -> Result<(), SecretStoreError> {
|
||||
@@ -56,6 +64,12 @@ impl SecretStore for LocalFileSecretStore {
|
||||
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Sum of all vec set {ns} {key} {:?}",
|
||||
val.iter()
|
||||
.fold(0, |acc: u64, val: &u8| { acc + *val as u64 })
|
||||
);
|
||||
|
||||
tokio::fs::write(&file_path, val)
|
||||
.await
|
||||
.map_err(|e| SecretStoreError::Store(Box::new(e)))
|
||||
|
||||
@@ -51,40 +51,6 @@ impl TryFrom<String> for MacAddress {
|
||||
|
||||
pub type IpAddress = std::net::IpAddr;
|
||||
|
||||
/// Represents a URL, which can either be a remote URL or a local file path.
|
||||
///
|
||||
/// For convenience, the `harmony_macros` crate provides `hurl!` and `local_folder!`
|
||||
/// macros to construct `Url` variants from string literals.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ### Manual Construction
|
||||
///
|
||||
/// The following example demonstrates how to build `Url` variants directly. This is
|
||||
/// the standard approach if you are not using the `harmony_macros` crate.
|
||||
///
|
||||
/// ```
|
||||
/// // The `use` statement below is for the doc test. In a real project,
|
||||
/// // you would use `use harmony_types::Url;`
|
||||
/// # use harmony_types::net::Url;
|
||||
/// let url = Url::Url(url::Url::parse("https://example.com").unwrap());
|
||||
/// let local_path = Url::LocalFolder("/var/data".to_string());
|
||||
///
|
||||
/// assert!(matches!(url, Url::Url(_)));
|
||||
/// assert!(matches!(local_path, Url::LocalFolder(_)));
|
||||
/// ```
|
||||
///
|
||||
/// ### Usage with `harmony_macros`
|
||||
///
|
||||
/// If `harmony_macros` is a dependency, you can create `Url`s more concisely.
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// use harmony_macros::{hurl, local_folder};
|
||||
/// use harmony_types::Url;
|
||||
///
|
||||
/// let hurl = hurl!("https://example.com");
|
||||
/// let local_path = local_folder!("/var/data");
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Url {
|
||||
LocalFolder(String),
|
||||
|
||||
@@ -2,8 +2,8 @@ mod downloadable_asset;
|
||||
use downloadable_asset::*;
|
||||
|
||||
use kube::Client;
|
||||
use log::{debug, info};
|
||||
use std::{ffi::OsStr, path::PathBuf};
|
||||
use log::debug;
|
||||
use std::path::PathBuf;
|
||||
|
||||
const K3D_BIN_FILE_NAME: &str = "k3d";
|
||||
|
||||
@@ -213,19 +213,15 @@ impl K3d {
|
||||
}
|
||||
}
|
||||
|
||||
let client;
|
||||
if !self.is_cluster_initialized() {
|
||||
debug!("Cluster is not initialized, initializing now");
|
||||
client = self.initialize_cluster().await?;
|
||||
} else {
|
||||
self.start_cluster().await?;
|
||||
|
||||
debug!("K3d and cluster are already properly set up");
|
||||
client = self.create_kubernetes_client().await?;
|
||||
return self.initialize_cluster().await;
|
||||
}
|
||||
|
||||
self.ensure_k3d_config_is_default(self.get_cluster_name()?)?;
|
||||
Ok(client)
|
||||
self.start_cluster().await?;
|
||||
|
||||
debug!("K3d and cluster are already properly set up");
|
||||
self.create_kubernetes_client().await
|
||||
}
|
||||
|
||||
// Private helper methods
|
||||
@@ -306,16 +302,7 @@ impl K3d {
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
let binary_path = self.get_k3d_binary()?;
|
||||
self.run_command(binary_path, args)
|
||||
}
|
||||
|
||||
pub fn run_command<I, S, C>(&self, cmd: C, args: I) -> Result<std::process::Output, String>
|
||||
where
|
||||
I: IntoIterator<Item = S>,
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
C: AsRef<OsStr>,
|
||||
{
|
||||
let output = std::process::Command::new(cmd).args(args).output();
|
||||
let output = std::process::Command::new(binary_path).args(args).output();
|
||||
match output {
|
||||
Ok(output) => {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
@@ -324,7 +311,7 @@ impl K3d {
|
||||
debug!("stdout : {}", stdout);
|
||||
Ok(output)
|
||||
}
|
||||
Err(e) => Err(format!("Failed to execute command: {}", e)),
|
||||
Err(e) => Err(format!("Failed to execute k3d command: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,38 +323,12 @@ impl K3d {
|
||||
return Err(format!("Failed to create cluster: {}", stderr));
|
||||
}
|
||||
|
||||
info!("Successfully created k3d cluster '{}'", cluster_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_k3d_config_is_default(&self, cluster_name: &str) -> Result<(), String> {
|
||||
let output = self.run_k3d_command(["kubeconfig", "merge", "-d", cluster_name])?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!("Failed to setup k3d kubeconfig : {}", stderr));
|
||||
}
|
||||
|
||||
let output = self.run_command(
|
||||
"kubectl",
|
||||
["config", "use-context", &format!("k3d-{cluster_name}")],
|
||||
)?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!(
|
||||
"Failed to switch kubectl context to k3d : {}",
|
||||
stderr
|
||||
));
|
||||
}
|
||||
info!(
|
||||
"kubectl is now using 'k3d-{}' as default context",
|
||||
cluster_name
|
||||
);
|
||||
debug!("Successfully created k3d cluster '{}'", cluster_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_kubernetes_client(&self) -> Result<Client, String> {
|
||||
// TODO: Connect the client to the right k3d cluster (see https://git.nationtech.io/NationTech/harmony/issues/92)
|
||||
Client::try_default()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to create Kubernetes client: {}", e))
|
||||
|
||||
@@ -51,6 +51,7 @@ pub struct OPNsense {
|
||||
|
||||
impl From<String> for OPNsense {
|
||||
fn from(content: String) -> Self {
|
||||
|
||||
yaserde::de::from_str(&content)
|
||||
.map_err(|e| println!("{}", e))
|
||||
.expect("OPNSense received invalid string, should be full XML")
|
||||
|
||||
@@ -227,14 +227,15 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_load_config_from_local_file() {
|
||||
for path in [
|
||||
// "src/tests/data/config-opnsense-25.1.xml",
|
||||
// "src/tests/data/config-vm-test.xml",
|
||||
"src/tests/data/config-opnsense-25.1.xml",
|
||||
"src/tests/data/config-vm-test.xml",
|
||||
"src/tests/data/config-structure.xml",
|
||||
"src/tests/data/config-full-1.xml",
|
||||
// "src/tests/data/config-full-ncd0.xml",
|
||||
// "src/tests/data/config-full-25.7.xml",
|
||||
// "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
|
||||
"src/tests/data/config-full-ncd0.xml",
|
||||
"src/tests/data/config-full-25.7.xml",
|
||||
"src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
|
||||
"src/tests/data/config-25.7-dnsmasq-static-host.xml",
|
||||
"src/tests/data/config-wk1-20250903.xmlDONOTCOMMIT",
|
||||
] {
|
||||
let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
test_file_path.push(path);
|
||||
@@ -252,6 +253,8 @@ mod tests {
|
||||
|
||||
let serialized = config.opnsense.to_xml();
|
||||
|
||||
fs::write("/tmp/serialized.xml", &serialized).unwrap();
|
||||
|
||||
// Since the order of all fields is not always the same in opnsense config files
|
||||
// I think it is good enough to have exactly the same amount of the same lines
|
||||
let mut before = config_file_str.lines().collect::<Vec<_>>();
|
||||
@@ -288,6 +291,8 @@ mod tests {
|
||||
|
||||
let serialized = config.opnsense.to_xml();
|
||||
|
||||
fs::write("/tmp/serialized.xml", &serialized).unwrap();
|
||||
|
||||
let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
test_file_path.push("src/tests/data/config-structure-with-dhcp-staticmap-entry.xml");
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
<description>System Administrators</description>
|
||||
<scope>system</scope>
|
||||
<gid>1999</gid>
|
||||
<member>0,2000</member>
|
||||
<member>0</member>
|
||||
<priv>page-all</priv>
|
||||
<source_networks/>
|
||||
</group>
|
||||
|
||||
@@ -215,6 +215,7 @@
|
||||
<description>System Administrators</description>
|
||||
<scope>system</scope>
|
||||
<gid>1999</gid>
|
||||
<member>0</member>
|
||||
<member>2000</member>
|
||||
<priv>page-all</priv>
|
||||
</group>
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
<description>System Administrators</description>
|
||||
<scope>system</scope>
|
||||
<gid>1999</gid>
|
||||
<member>0</member>
|
||||
<member>2000</member>
|
||||
<priv>page-all</priv>
|
||||
</group>
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
<description>System Administrators</description>
|
||||
<scope>system</scope>
|
||||
<gid>1999</gid>
|
||||
<member>0</member>
|
||||
<member>2000</member>
|
||||
<priv>page-all</priv>
|
||||
</group>
|
||||
|
||||
Reference in New Issue
Block a user