Compare commits

..

3 Commits

Author SHA1 Message Date
35a459f63c wip: OKD Installation full process automation underway, ready to test bootstrapping very soon
Some checks failed
Run Check Script / check (pull_request) Failing after 30s
2025-09-01 23:21:44 -04:00
f076d36297 wip: bootstrap step of okd installation required some refactoring, its getting there
Some checks failed
Run Check Script / check (pull_request) Failing after 30s
2025-09-01 19:14:31 -04:00
138e414727 feat(opnsense-config): dnsmasq dhcp static mappings
Some checks failed
Run Check Script / check (pull_request) Failing after 31s
2025-09-01 17:34:19 -04:00
128 changed files with 1493 additions and 5200 deletions

2
.gitattributes vendored
View File

@@ -2,5 +2,3 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text
grubx64.efi filter=lfs diff=lfs merge=lfs -text
initrd filter=lfs diff=lfs merge=lfs -text
linux filter=lfs diff=lfs merge=lfs -text
data/okd/bin/* filter=lfs diff=lfs merge=lfs -text
data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored
View File

@@ -3,7 +3,6 @@ private_repos/
### Harmony ###
harmony.log
data/okd/installation_files*
### Helm ###
# Chart dependencies

3
.gitmodules vendored
View File

@@ -1,3 +0,0 @@
[submodule "examples/try_rust_webapp/tryrust.org"]
path = examples/try_rust_webapp/tryrust.org
url = https://github.com/rust-dd/tryrust.org.git

View File

@@ -1,20 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
"describe": {
"columns": [
{
"name": "host_id",
"ordinal": 0,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false
]
},
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
}

View File

@@ -1,32 +0,0 @@
{
"db_name": "SQLite",
"query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json<PhysicalHost>\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "version_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "data: Json<PhysicalHost>",
"ordinal": 2,
"type_info": "Blob"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
false
]
},
"hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067"
}

View File

@@ -1,12 +0,0 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 2
},
"nullable": []
},
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
}

743
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,4 @@
use log::debug;
use mdns_sd::{ServiceDaemon, ServiceEvent};
use crate::SERVICE_TYPE;
@@ -73,7 +74,7 @@ pub async fn discover() {
// }
}
async fn _discover_example() {
async fn discover_example() {
use mdns_sd::{ServiceDaemon, ServiceEvent};
// Create a daemon

BIN
data/okd/bin/kubectl (Stored with Git LFS)

Binary file not shown.

BIN
data/okd/bin/oc (Stored with Git LFS)

Binary file not shown.

BIN
data/okd/bin/oc_README.md (Stored with Git LFS)

Binary file not shown.

BIN
data/okd/bin/openshift-install (Stored with Git LFS)

Binary file not shown.

BIN
data/okd/bin/openshift-install_README.md (Stored with Git LFS)

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1 +0,0 @@
scos-9.0.20250510-0-live-initramfs.x86_64.img

View File

@@ -1 +0,0 @@
scos-9.0.20250510-0-live-kernel.x86_64

View File

@@ -1 +0,0 @@
scos-9.0.20250510-0-live-rootfs.x86_64.img

View File

@@ -1,132 +0,0 @@
# Harmony, Orchestrateur d'infrastructure open-source
**Target Duration:** 25 minutes\
**Tone:** Friendly, expert-to-expert, inspiring.
---
#### **Slide 1: Title Slide**
- **Visual:** Clean and simple. Your company logo (NationTech) and the Harmony logo.
---
#### **Slide 2: The YAML Labyrinth**
**Goal:** Get every head in the room nodding in agreement. Start with their world, not yours.
- **Visual:**
- Option A: "The Pull Request from Hell". A screenshot of a GitHub pull request for a seemingly minor change that touches dozens of YAML files across multiple directories. A sea of red and green diffs that is visually overwhelming.
- Option B: A complex flowchart connecting dozens of logos: Terraform, Ansible, K8s, Helm, etc.
- **Narration:**\
[...ADD SOMETHING FOR INTRODUCTION...]\
"We love the power that tools like Kubernetes and the CNCF landscape have given us. But let's be honest... when did our infrastructure code start looking like _this_?"\
"We have GitOps, which is great. But it often means we're managing this fragile cathedral of YAML, Helm charts, and brittle scripts. We spend more time debugging indentation and tracing variables than we do building truly resilient systems."
---
#### **Slide 3: The Real Cost of Infrastructure**
- **Visual:** "The Jenga Tower of Tools". A tall, precarious Jenga tower where each block is the logo of a different tool (Terraform, K8s, Helm, Ansible, Prometheus, ArgoCD, etc.). One block near the bottom is being nervously pulled out.
- **Narration:**
"The real cost isn't just complexity; it's the constant need to choose, learn, integrate, and operate a dozen different tools, each with its own syntax and failure modes. It's the nagging fear that a tiny typo in a config file could bring everything down. Click-ops isn't the answer, but the current state of IaC feels like we've traded one problem for another."
---
#### **Slide 4: The Broken Promise of "Code"**
**Goal:** Introduce the core idea before introducing the product. This makes the solution feel inevitable.
- **(Initial Visual):** A two-panel slide.
- **Left Panel Title: "The Plan"** - A terminal showing a green, successful `terraform plan` output.
- **Right Panel Title: "The Reality"** - The _next_ screen in the terminal, showing the `terraform apply` failing with a cascade of red error text.
- **Narration:**
"We call our discipline **Infrastructure as Code**. And we've all been here. Our 'compiler' is a `terraform plan` that says everything looks perfect. We get the green light."
(Pause for a beat)
"And then we `apply`, and reality hits. It fails halfway through, at runtime, when it's most expensive and painful to fix."
**(Click to transition the slide)**
- **(New Visual):** The entire slide is replaced by a clean screenshot of a code editor (like nvim 😉) showing Harmony's Rust DSL. A red squiggly line is under a config line. The error message is clear in the "Problems" panel: `error: Incompatible deployment. Production target 'gcp-prod-cluster' requires a StorageClass with 'snapshots' capability, but 'standard-sc' does not provide it.`
- **Narration (continued):**
"In software development, we solved these problems years ago. We don't accept 'it compiled, but crashed on startup'. We have real tools, type systems, compilers, test frameworks, and IDEs that catch our mistakes before they ever reach production. **So, what if we could treat our entire infrastructure... like a modern, compiled application?**"
"What if your infrastructure code could get compile-time checks, straight into the editor... instead of runtime panics and failures at 3 AM in production?"
---
#### **Slide 5: Introducing Harmony**
**Goal:** Introduce Harmony as the answer to the "What If?" question.
- **Visual:** The Harmony logo, large and centered.
- **Tagline:** `Infrastructure in type-safe Rust. No YAML required.`
- **Narration:**
"This is Harmony. It's an open-source orchestrator that lets you define your entire stack — from a dev laptop to a multi-site bare-metal cluster—in a single, type-safe Rust codebase."
---
#### **Slide 6: Before & After**
- **Visual:** A side-by-side comparison. Left side: A screen full of complex, nested YAML. Right side: 10-15 lines of clean, readable Harmony Rust DSL that accomplishes the same thing.
- **Narration:**
"This is the difference. On the left, the fragile world of strings and templates. On the right, a portable, verifiable program that describes your apps, your infra, and your operations. We unify scaffolding, provisioning, and Day-2 ops, all verified by the Rust compiler. But enough slides... let's see it in action."
---
#### **Slide 7: Live Demo: Zero to Monitored App**
**Goal:** Show, don't just tell. Make it look effortless. This is where you build the "dream."
- **Visual:** Your terminal/IDE, ready to go.
- **Narration Guide:**
"Okay, for this demo, we're going to take a standard web app from GitHub. Nothing special about it."
_(Show the repo)_
"Now, let's bring it into Harmony. This is the entire definition we need to describe the application and its needs."
_(Show the Rust DSL)_
"First, let's run it locally on k3d. The exact same definition for dev as for prod."
_(Deploy locally, show it works)_
"Cool. But a real app needs monitoring. In Harmony, that's just adding a feature to our code."
_(Uncomment one line: `.with_feature(Monitoring)` and redeploy)_
"And just like that, we have a fully configured Prometheus and Grafana stack, scraping our app. No YAML, no extra config."
"Finally, let's push this to our production staging cluster. We just change the target and specify our multi-site Ceph storage."
_(Deploy to the remote cluster)_
"And there it is. We've gone from a simple web app to a monitored, enterprise-grade service in minutes."
---
#### **Slide 8: Live Demo: Embracing Chaos**
**Goal:** Prove the "predictable" and "resilient" claims in the most dramatic way possible.
- **Visual:** A slide showing a map or diagram of your distributed infrastructure (the different data centers). Then switch back to your terminal.
- **Narration Guide:**
"This is great when things are sunny. But production is chaos. So... let's break things. On purpose."
"First, a network failure." _(Kill a switch/link, show app is still up)_
"Now, let's power off a storage server." _(Force off a server, show Ceph healing and the app is unaffected)_
"How about a control plane node?" _(Force off a k8s control plane, show the cluster is still running)_
"Okay, for the grand finale. What if we have a cascading failure? I'm going to kill _another_ storage server. This should cause a total failure in this data center."
_(Force off the second server, narrate what's happening)_
"And there it is... Ceph has lost quorum in this site... and Harmony has automatically failed everything over to our other datacenter. The app is still running."
---
#### **Slide 9: The New Reality**
**Goal:** Summarize the dream and tell the audience what you want them to do.
- **Visual:** The clean, simple Harmony Rust DSL code from Slide 6. A summary of what was just accomplished is listed next to it: `✓ GitHub to Prod in minutes`, `✓ Type-Safe Validation`, `✓ Built-in Monitoring`, `✓ Automated Multi-Site Failover`.
- **Narration:**
"So, in just a few minutes, we went from a simple web app to a multi-site, monitored, and chaos-proof production deployment. We did it with a small amount of code that is easy to read, easy to verify, and completely portable. This is our vision: to offload the complexity, and make infrastructure simple, predictable, and even fun again."
---
#### **Slide 10: Join Us**
- **Visual:** A clean, final slide with QR codes and links.
- GitHub Repo (`github.com/nation-tech/harmony`)
- Website (`harmony.sh` or similar)
- Your contact info (`jg@nation.tech` / LinkedIn / Twitter)
- **Narration:**
"Harmony is open-source, AGPLv3. We believe this is the future, but we're just getting started. We know this crowd has great infrastructure minds out there, and we need your feedback. Please, check out the project on GitHub. Star it if you like what you see. Tell us what's missing. Let's build this future together. Thank you."
**(Open for Q&A)**

View File

@@ -1,8 +0,0 @@
## Bios settings
1. CSM : Disabled (compatibility support to boot gpt formatted drives)
2. Secure boot : disabled
3. Boot order :
1. Local Hard drive
2. PXE IPv4
4. System clock, make sure it is adjusted, otherwise you will get invalid certificates error

View File

@@ -30,7 +30,6 @@ async fn main() {
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
service_port: 3000,
});
let webhook_receiver = WebhookReceiver {

View File

@@ -13,7 +13,6 @@ harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
harmony_secret = { path = "../../harmony_secret" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@@ -5,9 +5,7 @@ use std::{
use cidr::Ipv4Cidr;
use harmony::{
config::secret::SshKeyPair,
data::{FileContent, FilePath},
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
hardware::{Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
modules::{
@@ -15,14 +13,13 @@ use harmony::{
okd::{
bootstrap_dhcp::OKDBootstrapDhcpScore,
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
dns::OKDDnsScore, ipxe::OKDIpxeScore,
dns::OKDDnsScore,
},
tftp::TftpScore,
},
topology::{LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, mac_address};
use harmony_secret::SecretManager;
use harmony_types::net::Url;
#[tokio::main]
@@ -126,28 +123,14 @@ async fn main() {
let load_balancer_score =
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore {
folder_to_serve: Some(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
)),
files: vec![],
remote_path: None,
};
let kickstart_filename = "inventory.kickstart".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let ipxe_score = OKDIpxeScore {
kickstart_filename,
harmony_inventory_agent,
cluster_pubkey: FileContent {
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
content: ssh_key.public,
},
};
let ipxe_score = IpxeScore::new();
harmony_tui::run(
inventory,

View File

@@ -1,21 +0,0 @@
[package]
name = "example-okd-install"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true

View File

@@ -1,4 +0,0 @@
export HARMONY_SECRET_NAMESPACE=example-vms
export HARMONY_SECRET_STORE=file
export HARMONY_DATABASE_URL=sqlite://harmony_vms.sqlite RUST_LOG=info
export RUST_LOG=info

View File

@@ -1,34 +0,0 @@
mod topology;
use crate::topology::{get_inventory, get_topology};
use harmony::{
config::secret::SshKeyPair,
data::{FileContent, FilePath},
modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
score::Score,
topology::HAClusterTopology,
};
use harmony_secret::SecretManager;
#[tokio::main]
async fn main() {
let inventory = get_inventory();
let topology = get_topology().await;
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
let mut scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![Box::new(OKDIpxeScore {
kickstart_filename: "inventory.kickstart".to_string(),
harmony_inventory_agent: "harmony_inventory_agent".to_string(),
cluster_pubkey: FileContent {
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
content: ssh_key.public,
},
})];
scores.append(&mut OKDInstallationPipeline::get_all_scores().await);
harmony_cli::run(inventory, topology, scores, None)
.await
.unwrap();
}

View File

@@ -1,77 +0,0 @@
use cidr::Ipv4Cidr;
use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager};
use serde::{Deserialize, Serialize};
use std::{net::IpAddr, sync::Arc};
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct OPNSenseFirewallConfig {
username: String,
password: String,
}
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("opnsense-1"),
};
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
let config = config.unwrap();
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(
firewall,
None,
&config.username,
&config.password,
)
.await,
);
let lan_subnet = ipv4!("192.168.1.0");
let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![LogicalHost {
ip: ip!("192.168.1.20"),
name: "master".to_string(),
}],
bootstrap_host: LogicalHost {
ip: ip!("192.168.1.10"),
name: "bootstrap".to_string(),
},
workers: vec![],
switch: vec![],
}
}
pub fn get_inventory() -> Inventory {
Inventory {
location: Location::new(
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
"testopnsense".to_string(),
),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![],
}
}

View File

@@ -1,7 +0,0 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
-----END OPENSSH PRIVATE KEY-----

View File

@@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2

View File

@@ -1,12 +1,7 @@
mod topology;
use crate::topology::{get_inventory, get_topology};
use harmony::{
config::secret::SshKeyPair,
data::{FileContent, FilePath},
modules::okd::ipxe::OKDIpxeScore,
};
use harmony_secret::SecretManager;
use harmony::modules::okd::ipxe::OkdIpxeScore;
#[tokio::main]
async fn main() {
@@ -14,16 +9,13 @@ async fn main() {
let topology = get_topology().await;
let kickstart_filename = "inventory.kickstart".to_string();
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
let ipxe_score = OKDIpxeScore {
let ipxe_score = OkdIpxeScore {
kickstart_filename,
harmony_inventory_agent,
cluster_pubkey: FileContent {
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
content: ssh_key.public,
},
cluster_pubkey_filename,
};
harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None)

View File

@@ -1,22 +1,28 @@
use cidr::Ipv4Cidr;
use harmony::{
config::secret::OPNSenseFirewallCredentials,
hardware::{Location, SwitchGroup},
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::SecretManager;
use harmony_secret::{Secret, SecretManager};
use serde::{Deserialize, Serialize};
use std::{net::IpAddr, sync::Arc};
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct OPNSenseFirewallConfig {
username: String,
password: String,
}
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("opnsense-1"),
};
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
let config = SecretManager::get::<OPNSenseFirewallConfig>().await;
let config = config.unwrap();
let opnsense = Arc::new(

View File

@@ -5,7 +5,7 @@ use std::{
use cidr::Ipv4Cidr;
use harmony::{
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
modules::{
@@ -85,7 +85,6 @@ async fn main() {
"./data/watchguard/pxe-http-files".to_string(),
)),
files: vec![],
remote_path: None,
};
harmony_tui::run(

View File

@@ -1,17 +0,0 @@
[package]
name = "rhob-application-monitoring"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_macros = { path = "../../harmony_macros" }
tokio = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
base64.workspace = true

View File

@@ -1,50 +0,0 @@
use std::{path::PathBuf, sync::Arc};
use harmony::{
inventory::Inventory,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::rhob_monitoring::RHOBMonitoring,
},
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
},
topology::K8sAnywhereTopology,
};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let application = Arc::new(RustWebapp {
name: "test-rhob-monitoring".to_string(),
domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()),
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
framework: Some(RustWebFramework::Leptos),
service_port: 3000,
});
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![
Box::new(RHOBMonitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver)],
}),
// TODO add backups, multisite ha, etc
],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(app)],
None,
)
.await
.unwrap();
}

View File

@@ -13,26 +13,25 @@ use harmony::{
},
topology::K8sAnywhereTopology,
};
use harmony_macros::hurl;
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(),
domain: hurl!("https://rustapp.harmony.example.com"),
project_root: PathBuf::from("./webapp"),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
framework: Some(RustWebFramework::Leptos),
service_port: 3000,
});
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: hurl!("https://discord.doesnt.exist.com"),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
};
let webhook_receiver = WebhookReceiver {
name: "sample-webhook-receiver".to_string(),
url: hurl!("https://webhook-doesnt-exist.com"),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
};
let app = ApplicationScore {

View File

@@ -1,17 +0,0 @@
[package]
name = "example-try-rust-webapp"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_macros = { path = "../../harmony_macros" }
tokio = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
base64.workspace = true

View File

@@ -1,52 +0,0 @@
use std::{path::PathBuf, sync::Arc};
use harmony::{
inventory::Inventory,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
},
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
},
topology::K8sAnywhereTopology,
};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let application = Arc::new(RustWebapp {
name: "harmony-example-tryrust".to_string(),
domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()),
project_root: PathBuf::from("./tryrust.org"),
framework: Some(RustWebFramework::Leptos),
service_port: 8080,
});
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![
Box::new(ContinuousDelivery {
application: application.clone(),
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver)],
}),
],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(app)],
None,
)
.await
.unwrap();
}

View File

@@ -9,7 +9,6 @@ use harmony::{
},
topology::{
BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService,
SSL,
},
};
use harmony_macros::ipv4;
@@ -48,7 +47,6 @@ fn build_large_score() -> LoadBalancerScore {
.to_string(),
HttpMethod::GET,
HttpStatusCode::Success2xx,
SSL::Disabled,
)),
};
LoadBalancerScore {

View File

@@ -66,10 +66,8 @@ tar.workspace = true
base64.workspace = true
thiserror.workspace = true
once_cell = "1.21.3"
walkdir = "2.5.0"
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
harmony_secret_derive = { path = "../harmony_secret_derive" }
harmony_secret = { path = "../harmony_secret" }
harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" }
askama.workspace = true
sqlx.workspace = true
inquire.workspace = true

View File

@@ -1,5 +1,3 @@
pub mod secret;
use lazy_static::lazy_static;
use std::path::PathBuf;

View File

@@ -1,20 +0,0 @@
use harmony_secret_derive::Secret;
use serde::{Deserialize, Serialize};
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
pub struct OPNSenseFirewallCredentials {
pub username: String,
pub password: String,
}
// TODO we need a better way to handle multiple "instances" of the same secret structure.
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
pub struct SshKeyPair {
pub private: String,
pub public: String,
}
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
pub struct RedhatSecret {
pub pull_secret: String,
}

View File

@@ -1,3 +1,5 @@
use std::sync::Arc;
use derive_new::new;
use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive};
use harmony_types::net::MacAddress;
@@ -8,7 +10,7 @@ pub type HostGroup = Vec<PhysicalHost>;
pub type SwitchGroup = Vec<Switch>;
pub type FirewallGroup = Vec<PhysicalHost>;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize)]
pub struct PhysicalHost {
pub id: Id,
pub category: HostCategory,
@@ -149,98 +151,6 @@ impl PhysicalHost {
parts.join(" | ")
}
pub fn parts_list(&self) -> String {
let PhysicalHost {
id,
category,
network,
storage,
labels,
memory_modules,
cpus,
} = self;
let mut parts_list = String::new();
parts_list.push_str("\n\n=====================");
parts_list.push_str(&format!("\nHost ID {id}"));
parts_list.push_str("\n=====================");
parts_list.push_str("\n\n=====================");
parts_list.push_str(&format!("\nCPU count {}", cpus.len()));
parts_list.push_str("\n=====================");
cpus.iter().for_each(|c| {
let CPU {
model,
vendor,
cores,
threads,
frequency_mhz,
} = c;
parts_list.push_str(&format!(
"\n{vendor} {model}, {cores}/{threads} {}Ghz",
*frequency_mhz as f64 / 1000.0
));
});
parts_list.push_str("\n\n=====================");
parts_list.push_str(&format!("\nNetwork Interfaces count {}", network.len()));
parts_list.push_str("\n=====================");
network.iter().for_each(|nic| {
parts_list.push_str(&format!(
"\nNic({} {}Gbps mac({}) ipv4({}), ipv6({})",
nic.name,
nic.speed_mbps.unwrap_or(0) / 1000,
nic.mac_address,
nic.ipv4_addresses.join(","),
nic.ipv6_addresses.join(",")
));
});
parts_list.push_str("\n\n=====================");
parts_list.push_str(&format!("\nStorage drives count {}", storage.len()));
parts_list.push_str("\n=====================");
storage.iter().for_each(|drive| {
let StorageDrive {
name,
model,
serial,
size_bytes,
logical_block_size: _,
physical_block_size: _,
rotational: _,
wwn: _,
interface_type,
smart_status,
} = drive;
parts_list.push_str(&format!(
"\n{name} {}Gb {model} {interface_type} smart({smart_status:?}) {serial}",
size_bytes / 1000 / 1000 / 1000
));
});
parts_list.push_str("\n\n=====================");
parts_list.push_str(&format!("\nMemory modules count {}", memory_modules.len()));
parts_list.push_str("\n=====================");
memory_modules.iter().for_each(|mem| {
let MemoryModule {
size_bytes,
speed_mhz,
manufacturer,
part_number,
serial_number,
rank,
} = mem;
parts_list.push_str(&format!(
"\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})",
size_bytes / 1000 / 1000 / 1000,
speed_mhz.unwrap_or(0),
manufacturer.as_ref().unwrap_or(&String::new()),
part_number.as_ref().unwrap_or(&String::new()),
));
});
parts_list
}
pub fn cluster_mac(&self) -> MacAddress {
self.network
.first()
@@ -315,6 +225,15 @@ impl PhysicalHost {
// }
// }
impl<'de> Deserialize<'de> for PhysicalHost {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
todo!()
}
}
#[derive(new, Serialize)]
pub struct ManualManagementInterface;
@@ -358,13 +277,16 @@ where
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize)]
pub enum HostCategory {
Server,
Firewall,
Switch,
}
#[cfg(test)]
use harmony_macros::mac_address;
use harmony_types::id::Id;
#[derive(Debug, Clone, Serialize)]
@@ -373,7 +295,7 @@ pub struct Switch {
_management_interface: NetworkInterface,
}
#[derive(Debug, new, Clone, Serialize, Deserialize)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Label {
pub name: String,
pub value: String,

View File

@@ -33,7 +33,6 @@ pub enum InterpretName {
DiscoverInventoryAgent,
CephClusterHealth,
Custom(&'static str),
RHOBAlerting,
}
impl std::fmt::Display for InterpretName {
@@ -63,7 +62,6 @@ impl std::fmt::Display for InterpretName {
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
InterpretName::Custom(name) => f.write_str(name),
InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"),
}
}
}
@@ -144,12 +142,6 @@ impl From<PreparationError> for InterpretError {
}
}
impl From<harmony_secret::SecretStoreError> for InterpretError {
fn from(value: harmony_secret::SecretStoreError) -> Self {
InterpretError::new(format!("Interpret error : {value}"))
}
}
impl From<ExecutorError> for InterpretError {
fn from(value: ExecutorError) -> Self {
Self {

View File

@@ -17,14 +17,12 @@ impl InventoryFilter {
use derive_new::new;
use log::info;
use serde::{Deserialize, Serialize};
use strum::EnumIter;
use crate::hardware::{ManagementInterface, ManualManagementInterface};
use super::{
filter::Filter,
hardware::{HostGroup, Location, SwitchGroup},
hardware::{FirewallGroup, HostGroup, Location, SwitchGroup},
};
#[derive(Debug)]
@@ -64,7 +62,6 @@ impl Inventory {
}
}
#[derive(Debug, Serialize, Deserialize, sqlx::Type, Clone, EnumIter)]
pub enum HostRole {
Bootstrap,
ControlPlane,

View File

@@ -29,7 +29,6 @@ pub trait InventoryRepository: Send + Sync + 'static {
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>;
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>;
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError>;
async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError>;
async fn save_role_mapping(
&self,
role: &HostRole,

View File

@@ -69,26 +69,6 @@ impl K8sclient for HAClusterTopology {
}
impl HAClusterTopology {
// TODO this is a hack to avoid refactoring
pub fn get_cluster_name(&self) -> String {
self.domain_name
.split(".")
.next()
.expect("Cluster domain name must not be empty")
.to_string()
}
pub fn get_cluster_base_domain(&self) -> String {
let base_domain = self
.domain_name
.strip_prefix(&self.get_cluster_name())
.expect("cluster domain must start with cluster name");
base_domain
.strip_prefix(".")
.unwrap_or(base_domain)
.to_string()
}
pub fn autoload() -> Self {
let dummy_infra = Arc::new(DummyInfra {});
let dummy_host = LogicalHost {
@@ -237,12 +217,8 @@ impl Router for HAClusterTopology {
#[async_trait]
impl HttpServer for HAClusterTopology {
async fn serve_files(
&self,
url: &Url,
remote_path: &Option<String>,
) -> Result<(), ExecutorError> {
self.http_server.serve_files(url, remote_path).await
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
self.http_server.serve_files(url).await
}
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
@@ -401,11 +377,7 @@ impl TftpServer for DummyInfra {
#[async_trait]
impl HttpServer for DummyInfra {
async fn serve_files(
&self,
_url: &Url,
_remote_path: &Option<String>,
) -> Result<(), ExecutorError> {
async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> {

View File

@@ -5,11 +5,7 @@ use harmony_types::net::IpAddress;
use harmony_types::net::Url;
#[async_trait]
pub trait HttpServer: Send + Sync {
async fn serve_files(
&self,
url: &Url,
remote_path: &Option<String>,
) -> Result<(), ExecutorError>;
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;

View File

@@ -17,7 +17,7 @@ use kube::{
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::{Value, json};
use serde_json::json;
use similar::TextDiff;
use tokio::io::AsyncReadExt;
@@ -53,21 +53,6 @@ impl K8sClient {
})
}
pub async fn get_resource_json_value(
&self,
name: &str,
namespace: Option<&str>,
gvk: &GroupVersionKind,
) -> Result<DynamicObject, Error> {
let gvk = ApiResource::from_gvk(gvk);
let resource: Api<DynamicObject> = if let Some(ns) = namespace {
Api::namespaced_with(self.client.clone(), ns, &gvk)
} else {
Api::default_namespaced_with(self.client.clone(), &gvk)
};
Ok(resource.get(name).await?)
}
pub async fn get_deployment(
&self,
name: &str,

View File

@@ -14,11 +14,10 @@ use crate::{
monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus,
prometheus_operator::prometheus_operator_helm_chart_score,
rhob_alertmanager_config::RHOBObservability,
},
prometheus::{
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
prometheus::PrometheusApplicationMonitoring,
},
},
score::Score,
@@ -109,43 +108,6 @@ impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
}
}
#[async_trait]
impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
async fn install_prometheus(
&self,
sender: &RHOBObservability,
inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<RHOBObservability>>>>,
) -> Result<PreparationOutcome, PreparationError> {
let po_result = self.ensure_cluster_observability_operator(sender).await?;
if po_result == PreparationOutcome::Noop {
debug!("Skipping Prometheus CR installation due to missing operator.");
return Ok(po_result);
}
let result = self
.get_cluster_observability_operator_prometheus_application_score(
sender.clone(),
receivers,
)
.await
.interpret(inventory, self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: outcome.message,
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
}
}
impl Serialize for K8sAnywhereTopology {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
@@ -172,19 +134,6 @@ impl K8sAnywhereTopology {
}
}
async fn get_cluster_observability_operator_prometheus_application_score(
&self,
sender: RHOBObservability,
receivers: Option<Vec<Box<dyn AlertReceiver<RHOBObservability>>>>,
) -> RHOBAlertingScore {
RHOBAlertingScore {
sender,
receivers: receivers.unwrap_or_default(),
service_monitors: vec![],
prometheus_rules: vec![],
}
}
async fn get_k8s_prometheus_application_score(
&self,
sender: CRDPrometheus,
@@ -337,60 +286,6 @@ impl K8sAnywhereTopology {
}
}
async fn ensure_cluster_observability_operator(
&self,
sender: &RHOBObservability,
) -> Result<PreparationOutcome, PreparationError> {
let status = Command::new("sh")
.args(["-c", "kubectl get crd -A | grep -i rhobs"])
.status()
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
if !status.success() {
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => {
debug!("installing cluster observability operator");
todo!();
let op_score =
prometheus_operator_helm_chart_score(sender.namespace.clone());
let result = op_score.interpret(&Inventory::empty(), self).await;
return match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: "installed cluster observability operator".into(),
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(
"failed to install cluster observability operator (unknown error)".into(),
)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
};
}
K8sSource::Kubeconfig => {
debug!(
"unable to install cluster observability operator, contact cluster admin"
);
return Ok(PreparationOutcome::Noop);
}
}
} else {
warn!(
"Unable to detect k8s_state. Skipping Cluster Observability Operator install."
);
return Ok(PreparationOutcome::Noop);
}
}
debug!("Cluster Observability Operator is already present, skipping install");
Ok(PreparationOutcome::Success {
details: "cluster observability operator present in cluster".into(),
})
}
async fn ensure_prometheus_operator(
&self,
sender: &CRDPrometheus,

View File

@@ -102,17 +102,8 @@ pub enum HttpStatusCode {
ServerError5xx,
}
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum SSL {
SSL,
Disabled,
Default,
SNI,
Other(String),
}
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HealthCheck {
HTTP(String, HttpMethod, HttpStatusCode, SSL),
HTTP(String, HttpMethod, HttpStatusCode),
TCP(Option<u16>),
}

View File

@@ -11,21 +11,15 @@ use super::{LogicalHost, k8s::K8sClient};
#[derive(Debug)]
pub struct DHCPStaticEntry {
pub name: String,
pub mac: Vec<MacAddress>,
pub mac: MacAddress,
pub ip: Ipv4Addr,
}
impl std::fmt::Display for DHCPStaticEntry {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mac = self
.mac
.iter()
.map(|m| m.to_string())
.collect::<Vec<String>>()
.join(",");
f.write_fmt(format_args!(
"DHCPStaticEntry : name {}, mac {}, ip {}",
self.name, mac, self.ip
self.name, self.mac, self.ip
))
}
}
@@ -47,7 +41,6 @@ impl std::fmt::Debug for dyn Firewall {
pub struct NetworkDomain {
pub name: String,
}
#[async_trait]
pub trait K8sclient: Send + Sync {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;

View File

@@ -1,6 +1,6 @@
use crate::{
hardware::PhysicalHost,
inventory::{HostRole, InventoryRepository, RepoError},
inventory::{InventoryRepository, RepoError},
};
use async_trait::async_trait;
use harmony_types::id::Id;
@@ -46,104 +46,20 @@ impl InventoryRepository for SqliteInventoryRepository {
}
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> {
let row = sqlx::query_as!(
let _row = sqlx::query_as!(
DbHost,
r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#,
host_id
)
.fetch_optional(&self.pool)
.await?;
Ok(row.map(|r| r.data.0))
}
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError> {
let db_hosts = sqlx::query_as!(
DbHost,
r#"
SELECT
p1.id,
p1.version_id,
p1.data as "data: Json<PhysicalHost>"
FROM
physical_hosts p1
INNER JOIN (
SELECT
id,
MAX(version_id) AS max_version
FROM
physical_hosts
GROUP BY
id
) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version
"#
)
.fetch_all(&self.pool)
.await?;
let hosts = db_hosts.into_iter().map(|row| row.data.0).collect();
Ok(hosts)
}
async fn save_role_mapping(
&self,
role: &HostRole,
host: &PhysicalHost,
) -> Result<(), RepoError> {
let host_id = host.id.to_string();
sqlx::query!(
r#"
INSERT INTO host_role_mapping (host_id, role)
VALUES (?, ?)
"#,
host_id,
role
)
.execute(&self.pool)
.await?;
info!("Saved role mapping for host '{}' as '{:?}'", host.id, role);
Ok(())
}
async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError> {
struct HostIdRow {
host_id: String,
}
let role_str = format!("{:?}", role);
let host_id_rows = sqlx::query_as!(
HostIdRow,
"SELECT host_id FROM host_role_mapping WHERE role = ?",
role_str
)
.fetch_all(&self.pool)
.await?;
let mut hosts = Vec::with_capacity(host_id_rows.len());
for row in host_id_rows {
match self.get_latest_by_id(&row.host_id).await? {
Some(host) => hosts.push(host),
None => {
log::warn!(
"Found a role mapping for host_id '{}', but the host does not exist in the physical_hosts table. This may indicate a data integrity issue.",
row.host_id
);
}
}
}
Ok(hosts)
todo!()
}
}
use sqlx::types::Json;
struct DbHost {
data: Json<PhysicalHost>,
id: String,
version_id: String,
id: Id,
version_id: Id,
}

View File

@@ -17,13 +17,13 @@ impl DhcpServer for OPNSenseFirewall {
}
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> {
let mac: Vec<String> = entry.mac.iter().map(MacAddress::to_string).collect();
let mac: String = String::from(&entry.mac);
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense
.dhcp()
.add_static_mapping(&mac, &entry.ip, &entry.name)
.add_static_mapping(&mac, entry.ip, &entry.name)
.unwrap();
}

View File

@@ -1,3 +1,4 @@
use crate::infra::opnsense::Host;
use crate::infra::opnsense::LogicalHost;
use crate::{
executors::ExecutorError,
@@ -11,22 +12,21 @@ use super::OPNSenseFirewall;
#[async_trait]
impl DnsServer for OPNSenseFirewall {
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
todo!("Refactor this to use dnsmasq")
// let mut writable_opnsense = self.opnsense_config.write().await;
// let mut dns = writable_opnsense.dns();
// let hosts = hosts
// .iter()
// .map(|h| {
// Host::new(
// h.host.clone(),
// h.domain.clone(),
// h.record_type.to_string(),
// h.value.to_string(),
// )
// })
// .collect();
// dns.add_static_mapping(hosts);
// Ok(())
let mut writable_opnsense = self.opnsense_config.write().await;
let mut dns = writable_opnsense.dns();
let hosts = hosts
.iter()
.map(|h| {
Host::new(
h.host.clone(),
h.domain.clone(),
h.record_type.to_string(),
h.value.to_string(),
)
})
.collect();
dns.register_hosts(hosts);
Ok(())
}
fn remove_record(
@@ -38,26 +38,25 @@ impl DnsServer for OPNSenseFirewall {
}
async fn list_records(&self) -> Vec<crate::topology::DnsRecord> {
todo!("Refactor this to use dnsmasq")
// self.opnsense_config
// .write()
// .await
// .dns()
// .get_hosts()
// .iter()
// .map(|h| DnsRecord {
// host: h.hostname.clone(),
// domain: h.domain.clone(),
// record_type: h
// .rr
// .parse()
// .expect("received invalid record type {h.rr} from opnsense"),
// value: h
// .server
// .parse()
// .expect("received invalid ipv4 record from opnsense {h.server}"),
// })
// .collect()
self.opnsense_config
.write()
.await
.dns()
.get_hosts()
.iter()
.map(|h| DnsRecord {
host: h.hostname.clone(),
domain: h.domain.clone(),
record_type: h
.rr
.parse()
.expect("received invalid record type {h.rr} from opnsense"),
value: h
.server
.parse()
.expect("received invalid ipv4 record from opnsense {h.server}"),
})
.collect()
}
fn get_ip(&self) -> IpAddress {
@@ -69,12 +68,11 @@ impl DnsServer for OPNSenseFirewall {
}
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
todo!("Refactor this to use dnsmasq")
// let mut writable_opnsense = self.opnsense_config.write().await;
// let mut dns = writable_opnsense.dns();
// dns.register_dhcp_leases(register);
//
// Ok(())
let mut writable_opnsense = self.opnsense_config.write().await;
let mut dns = writable_opnsense.dns();
dns.register_dhcp_leases(register);
Ok(())
}
async fn commit_config(&self) -> Result<(), ExecutorError> {

View File

@@ -10,21 +10,13 @@ const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http";
#[async_trait]
impl HttpServer for OPNSenseFirewall {
async fn serve_files(
&self,
url: &Url,
remote_path: &Option<String>,
) -> Result<(), ExecutorError> {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
let config = self.opnsense_config.read().await;
info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}");
let remote_upload_path = remote_path
.clone()
.map(|r| format!("{OPNSENSE_HTTP_ROOT_PATH}/{r}"))
.unwrap_or(OPNSENSE_HTTP_ROOT_PATH.to_string());
match url {
Url::LocalFolder(path) => {
config
.upload_files(path, &remote_upload_path)
.upload_files(path, OPNSENSE_HTTP_ROOT_PATH)
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}

View File

@@ -1,15 +1,13 @@
use async_trait::async_trait;
use log::{debug, error, info, warn};
use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, MaybeString,
};
use log::{debug, info, warn};
use opnsense_config_xml::{Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer};
use uuid::Uuid;
use crate::{
executors::ExecutorError,
topology::{
BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService,
LogicalHost, SSL,
LogicalHost,
},
};
use harmony_types::net::IpAddress;
@@ -208,22 +206,7 @@ pub(crate) fn get_health_check_for_backend(
.unwrap_or_default()
.into();
let status_code: HttpStatusCode = HttpStatusCode::Success2xx;
let ssl = match haproxy_health_check
.ssl
.content_string()
.to_uppercase()
.as_str()
{
"SSL" => SSL::SSL,
"SSLNI" => SSL::SNI,
"NOSSL" => SSL::Disabled,
"" => SSL::Default,
other => {
error!("Unknown haproxy health check ssl config {other}");
SSL::Other(other.to_string())
}
};
Some(HealthCheck::HTTP(path, method, status_code, ssl))
Some(HealthCheck::HTTP(path, method, status_code))
}
_ => panic!("Received unsupported health check type {}", uppercase),
}
@@ -258,14 +241,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
// frontend points to backend
let healthcheck = if let Some(health_check) = &service.health_check {
match health_check {
HealthCheck::HTTP(path, http_method, _http_status_code, ssl) => {
let ssl: MaybeString = match ssl {
SSL::SSL => "ssl".into(),
SSL::SNI => "sslni".into(),
SSL::Disabled => "nossl".into(),
SSL::Default => "".into(),
SSL::Other(other) => other.as_str().into(),
};
HealthCheck::HTTP(path, http_method, _http_status_code) => {
let haproxy_check = HAProxyHealthCheck {
name: format!("HTTP_{http_method}_{path}"),
uuid: Uuid::new_v4().to_string(),
@@ -273,7 +249,6 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
health_check_type: "http".to_string(),
http_uri: path.clone().into(),
interval: "2s".to_string(),
ssl,
..Default::default()
};

View File

@@ -50,55 +50,6 @@ pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
}
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
pub async fn deploy<T>(&self, topology: &T, helm_chart: String, image: String) -> Result<(), String>
where
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
{
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
// That said, this will require some careful architectural decisions, since the concept of
// deployment targets / profiles is probably a layer of complexity that we won't be
// completely able to avoid
//
// I'll try something for now that must be thought through after : att a deployment_profile
// function to the topology trait that returns a profile, then anybody who needs it can
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: format!("{}", self.application.name()),
openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None,
name: format!("{}", self.application.name()),
namespace: format!("{}", self.application.name()),
})],
};
score
.interpret(&Inventory::empty(), topology)
.await
.unwrap();
}
};
Ok(())
}
async fn deploy_to_local_k3d(
&self,
app_name: String,
@@ -202,7 +153,50 @@ impl<
// https://git.nationtech.io/NationTech/harmony/issues/104
let image = self.application.build_push_oci_image().await?;
self.deploy(topology, helm_chart, image).await
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
// That said, this will require some careful architectural decisions, since the concept of
// deployment targets / profiles is probably a layer of complexity that we won't be
// completely able to avoid
//
// I'll try something for now that must be thought through after : att a deployment_profile
// function to the topology trait that returns a profile, then anybody who needs it can
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: "harmony-example-rust-webapp".to_string(),
openshift: true,
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
values_overrides: None,
name: "harmony-demo-rust-webapp".to_string(),
namespace: "harmony-example-rust-webapp".to_string(),
})],
};
score
.interpret(&Inventory::empty(), topology)
.await
.unwrap();
}
};
Ok(())
}
fn name(&self) -> String {
"ContinuousDelivery".to_string()

View File

@@ -1,10 +1,7 @@
use async_trait::async_trait;
use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use serde::de::DeserializeOwned;
use std::{process::Command, str::FromStr, sync::Arc};
use std::str::FromStr;
use crate::{
data::Version,
@@ -12,9 +9,7 @@ use crate::{
inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score,
topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
},
topology::{HelmCommand, K8sclient, Topology},
};
use harmony_types::id::Id;
@@ -24,13 +19,15 @@ use super::ArgoApplication;
pub struct ArgoHelmScore {
pub namespace: String,
pub openshift: bool,
pub domain: String,
pub argo_apps: Vec<ArgoApplication>,
}
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
let helm_score = argo_helm_chart_score(&self.namespace, self.openshift, &self.domain);
Box::new(ArgoInterpret {
score: self.clone(),
score: helm_score,
argo_apps: self.argo_apps.clone(),
})
}
@@ -42,7 +39,7 @@ impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
#[derive(Debug)]
pub struct ArgoInterpret {
score: ArgoHelmScore,
score: HelmChartScore,
argo_apps: Vec<ArgoApplication>,
}
@@ -53,16 +50,9 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
self.score.interpret(inventory, topology).await?;
let k8s_client = topology.k8s_client().await?;
let domain = self
.get_host_domain(k8s_client.clone(), self.score.openshift)
.await?;
let domain = format!("argo.{domain}");
let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
helm_score.interpret(inventory, topology).await?;
k8s_client
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
.await
@@ -95,38 +85,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
}
}
impl ArgoInterpret {
pub async fn get_host_domain(
&self,
client: Arc<K8sClient>,
openshift: bool,
) -> Result<String, InterpretError> {
//This should be the job of the topology to determine if we are in
//openshift, potentially we need on openshift topology the same way we create a
//localhosttopology
match openshift {
true => {
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.await?;
match ic.data["status"]["domain"].as_str() {
Some(domain) => return Ok(domain.to_string()),
None => return Err(InterpretError::new("Could not find domain".to_string())),
}
}
false => {
todo!()
}
};
}
}
pub fn argo_helm_chart_score(namespace: &str, openshift: bool, domain: &str) -> HelmChartScore {
let values = format!(
r#"
@@ -702,7 +660,7 @@ server:
# nginx.ingress.kubernetes.io/ssl-passthrough: "true"
# -- Defines which ingress controller will implement the resource
ingressClassName: "openshift-default"
ingressClassName: ""
# -- Argo CD server hostname
# @default -- `""` (defaults to global.domain)

View File

@@ -1,6 +1,4 @@
mod endpoint;
pub mod rhob_monitoring;
mod multisite;
pub use endpoint::*;
mod monitoring;

View File

@@ -1,49 +0,0 @@
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature, StatelessApplication};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::{K8sAnywhereTopology, MultiTargetTopology};
use crate::{
inventory::Inventory,
modules::monitoring::{
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
},
score::Score,
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
};
use crate::{
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver,
};
use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose};
use harmony_types::net::Url;
use log::{debug, info};
trait DebugTopology: Topology + std::fmt::Debug {}
#[derive(Debug, Clone)]
pub struct Multisite {
app: Arc<dyn StatelessApplication>,
secondary_site: Arc<K8sAnywhereTopology>,
}
#[async_trait]
impl<T: Topology> ApplicationFeature<T> for Multisite {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
todo!(
"
- Find a way to get pvs for this application
- find the pv csi volumes uuid
- run rbd mirror image enable --pool mirrored-pool csi-vol-<UUID_PV> snapshot
- enjoy
"
)
}
fn name(&self) -> String {
"Multisite".to_string()
}
}

View File

@@ -1,109 +0,0 @@
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::topology::MultiTargetTopology;
use crate::{
inventory::Inventory,
modules::monitoring::{
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
},
score::Score,
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
};
use crate::{
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver,
};
use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose};
use harmony_types::net::Url;
use log::{debug, info};
#[derive(Debug, Clone)]
pub struct RHOBMonitoring {
pub application: Arc<dyn Application>,
pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
}
#[async_trait]
impl<
T: Topology
+ HelmCommand
+ 'static
+ TenantManager
+ K8sclient
+ MultiTargetTopology
+ std::fmt::Debug
+ PrometheusApplicationMonitoring<RHOBObservability>,
> ApplicationFeature<T> for RHOBMonitoring
{
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
info!("Ensuring monitoring is available for application");
let namespace = topology
.get_tenant_config()
.await
.map(|ns| ns.name.clone())
.unwrap_or_else(|| self.application.name());
let mut alerting_score = ApplicationRHOBMonitoringScore {
sender: RHOBObservability {
namespace: namespace.clone(),
client: topology.k8s_client().await.unwrap(),
},
application: self.application.clone(),
receivers: self.alert_receiver.clone(),
};
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
};
ntfy.interpret(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
let ntfy_default_auth_header = format!(
"Basic {}",
general_purpose::STANDARD.encode(format!(
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
))
);
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
let ntfy_default_auth_param = general_purpose::STANDARD
.encode(ntfy_default_auth_header)
.replace("=", "");
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
let ntfy_receiver = WebhookReceiver {
name: "ntfy-webhook".to_string(),
url: Url::Url(
url::Url::parse(
format!(
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
namespace.clone()
)
.as_str(),
)
.unwrap(),
),
};
alerting_score.receivers.push(Box::new(ntfy_receiver));
alerting_score
.interpret(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
Ok(())
}
fn name(&self) -> String {
"Monitoring".to_string()
}
}

View File

@@ -2,10 +2,6 @@ mod feature;
pub mod features;
pub mod oci;
mod rust;
mod stateless;
mod stateful;
pub use stateless::*;
pub use stateful::*;
use std::sync::Arc;
pub use feature::*;

View File

@@ -1,5 +1,4 @@
use std::fs::{self, File};
use std::io::Read;
use std::fs;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
@@ -13,8 +12,7 @@ use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt;
use log::{debug, info, log_enabled};
use serde::Serialize;
use tar::{Archive, Builder, Header};
use walkdir::WalkDir;
use tar::Archive;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::{score::Score, topology::Topology};
@@ -61,7 +59,6 @@ pub struct RustWebapp {
pub domain: Url,
/// The path to the root of the Rust project to be containerized.
pub project_root: PathBuf,
pub service_port: u32,
pub framework: Option<RustWebFramework>,
}
@@ -161,99 +158,45 @@ impl RustWebapp {
image_name: &str,
) -> Result<String, Box<dyn std::error::Error>> {
debug!("Generating Dockerfile for '{}'", self.name);
let dockerfile = self.get_or_build_dockerfile();
let _dockerfile_path = self.build_dockerfile()?;
let docker = Docker::connect_with_socket_defaults().unwrap();
let quiet = !log_enabled!(log::Level::Debug);
match dockerfile
let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default()
.dockerfile("Dockerfile.harmony")
.t(image_name)
.q(quiet)
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
.platform("linux/x86_64");
let mut temp_tar_builder = tar::Builder::new(Vec::new());
temp_tar_builder
.append_dir_all("", self.project_root.clone())
.unwrap();
let archive = temp_tar_builder
.into_inner()
.expect("couldn't finish creating tar");
let archived_files = Archive::new(archive.as_slice())
.entries()
.unwrap()
.file_name()
.and_then(|os_str| os_str.to_str())
{
Some(path_str) => {
debug!("Building from dockerfile {}", path_str);
.map(|entry| entry.unwrap().path().unwrap().into_owned())
.collect::<Vec<_>>();
let tar_data = self
.create_deterministic_tar(&self.project_root.clone())
.await
.unwrap();
debug!("files in docker tar: {:#?}", archived_files);
let docker = Docker::connect_with_socket_defaults().unwrap();
let mut image_build_stream = docker.build_image(
build_image_options.build(),
None,
Some(body_full(archive.into())),
);
let build_image_options =
bollard::query_parameters::BuildImageOptionsBuilder::default()
.dockerfile(path_str)
.t(image_name)
.q(quiet)
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
.platform("linux/x86_64");
let mut image_build_stream = docker.build_image(
build_image_options.build(),
None,
Some(body_full(tar_data.into())),
);
while let Some(msg) = image_build_stream.next().await {
debug!("Message: {msg:?}");
}
Ok(image_name.to_string())
}
None => Err(Box::new(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Path is not valid UTF-8",
))),
while let Some(msg) = image_build_stream.next().await {
debug!("Message: {msg:?}");
}
}
///normalizes timestamp and ignores files that will bust the docker cache
async fn create_deterministic_tar(
&self,
project_root: &std::path::Path,
) -> Result<Vec<u8>, Box<dyn std::error::Error>> {
debug!("building tar file from project root {:#?}", project_root);
let mut tar_data = Vec::new();
{
let mut builder = Builder::new(&mut tar_data);
let ignore_prefixes = [
"target",
".git",
".github",
".harmony_generated",
"node_modules",
];
let mut entries: Vec<_> = WalkDir::new(project_root)
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.file_type().is_file())
.filter(|e| {
let rel_path = e.path().strip_prefix(project_root).unwrap();
!ignore_prefixes
.iter()
.any(|prefix| rel_path.starts_with(prefix))
})
.collect();
entries.sort_by_key(|e| e.path().to_owned());
for entry in entries {
let path = entry.path();
let rel_path = path.strip_prefix(project_root).unwrap();
let mut file = fs::File::open(path)?;
let mut header = Header::new_gnu();
header.set_size(entry.metadata()?.len());
header.set_mode(0o644);
header.set_mtime(0);
header.set_uid(0);
header.set_gid(0);
builder.append_data(&mut header, rel_path, &mut file)?;
}
builder.finish()?;
}
Ok(tar_data)
Ok(image_name.to_string())
}
/// Tags and pushes a Docker image to the configured remote registry.
@@ -329,11 +272,8 @@ impl RustWebapp {
"groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser",
));
dockerfile.push(ENV::from(format!(
"LEPTOS_SITE_ADDR=0.0.0.0:{}",
self.service_port
)));
dockerfile.push(EXPOSE::from(format!("{}/tcp", self.service_port)));
dockerfile.push(ENV::from("LEPTOS_SITE_ADDR=0.0.0.0:3000"));
dockerfile.push(EXPOSE::from("3000/tcp"));
dockerfile.push(WORKDIR::from("/home/appuser"));
// Copy static files
@@ -454,7 +394,7 @@ image:
service:
type: ClusterIP
port: {}
port: 3000
ingress:
enabled: true
@@ -474,123 +414,112 @@ ingress:
- chart-example.local
"#,
chart_name, image_repo, image_tag, self.service_port, self.name
chart_name, image_repo, image_tag, self.name
);
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
// Create templates/_helpers.tpl
let helpers_tpl = format!(
r#"
{{{{/*
let helpers_tpl = r#"
{{/*
Expand the name of the chart.
*/}}}}
{{{{- define "chart.name" -}}}}
{{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
{{{{- end }}}}
*/}}
{{- define "chart.name" -}}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{{{/*
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}}}
{{{{- define "chart.fullname" -}}}}
{{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
{{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
{{{{- end }}}}
"#
);
*/}}
{{- define "chart.fullname" -}}
{{- $name := default .Chart.Name $.Values.nameOverride }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
"#;
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
// Create templates/service.yaml
let service_yaml = format!(
r#"
let service_yaml = r#"
apiVersion: v1
kind: Service
metadata:
name: {{{{ include "chart.fullname" . }}}}
name: {{ include "chart.fullname" . }}
spec:
type: {{{{ $.Values.service.type }}}}
type: {{ $.Values.service.type }}
ports:
- name: main
port: {{{{ $.Values.service.port | default {} }}}}
targetPort: {{{{ $.Values.service.port | default {} }}}}
port: {{ $.Values.service.port | default 3000 }}
targetPort: {{ $.Values.service.port | default 3000 }}
protocol: TCP
selector:
app: {{{{ include "chart.name" . }}}}
"#,
self.service_port, self.service_port
);
app: {{ include "chart.name" . }}
"#;
fs::write(templates_dir.join("service.yaml"), service_yaml)?;
// Create templates/deployment.yaml
let deployment_yaml = format!(
r#"
let deployment_yaml = r#"
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{{{ include "chart.fullname" . }}}}
name: {{ include "chart.fullname" . }}
spec:
replicas: {{{{ $.Values.replicaCount }}}}
replicas: {{ $.Values.replicaCount }}
selector:
matchLabels:
app: {{{{ include "chart.name" . }}}}
app: {{ include "chart.name" . }}
template:
metadata:
labels:
app: {{{{ include "chart.name" . }}}}
app: {{ include "chart.name" . }}
spec:
containers:
- name: {{{{ .Chart.Name }}}}
image: "{{{{ $.Values.image.repository }}}}:{{{{ $.Values.image.tag | default .Chart.AppVersion }}}}"
imagePullPolicy: {{{{ $.Values.image.pullPolicy }}}}
- name: {{ .Chart.Name }}
image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ $.Values.image.pullPolicy }}
ports:
- name: main
containerPort: {{{{ $.Values.service.port | default {} }}}}
containerPort: {{ $.Values.service.port | default 3000 }}
protocol: TCP
"#,
self.service_port
);
"#;
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
// Create templates/ingress.yaml
let ingress_yaml = format!(
r#"
{{{{- if $.Values.ingress.enabled -}}}}
let ingress_yaml = r#"
{{- if $.Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{{{ include "chart.fullname" . }}}}
name: {{ include "chart.fullname" . }}
annotations:
{{{{- toYaml $.Values.ingress.annotations | nindent 4 }}}}
{{- toYaml $.Values.ingress.annotations | nindent 4 }}
spec:
{{{{- if $.Values.ingress.tls }}}}
{{- if $.Values.ingress.tls }}
tls:
{{{{- range $.Values.ingress.tls }}}}
{{- range $.Values.ingress.tls }}
- hosts:
{{{{- range .hosts }}}}
- {{{{ . | quote }}}}
{{{{- end }}}}
secretName: {{{{ .secretName }}}}
{{{{- end }}}}
{{{{- end }}}}
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{{{- range $.Values.ingress.hosts }}}}
- host: {{{{ .host | quote }}}}
{{- range $.Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{{{- range .paths }}}}
- path: {{{{ .path }}}}
pathType: {{{{ .pathType }}}}
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{{{ include "chart.fullname" $ }}}}
name: {{ include "chart.fullname" $ }}
port:
number: {{{{ $.Values.service.port | default {} }}}}
{{{{- end }}}}
{{{{- end }}}}
{{{{- end }}}}
"#,
self.service_port
);
number: {{ $.Values.service.port | default 3000 }}
{{- end }}
{{- end }}
{{- end }}
"#;
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
Ok(chart_dir)
@@ -642,6 +571,7 @@ spec:
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();
let oci_push_url = format!("oci://{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT);
let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name);
debug!(
"Pushing Helm chart {} to {}",
packaged_chart_path.to_string_lossy(),
@@ -660,20 +590,4 @@ spec:
debug!("push url {oci_push_url}");
Ok(format!("{}:{}", oci_pull_url, version))
}
fn get_or_build_dockerfile(&self) -> Result<PathBuf, Box<dyn std::error::Error>> {
let existing_dockerfile = self.project_root.join("Dockerfile");
debug!("project_root = {:?}", self.project_root);
debug!("checking = {:?}", existing_dockerfile);
if existing_dockerfile.exists() {
debug!(
"Checking path {:#?} for existing Dockerfile",
self.project_root.clone()
);
return Ok(existing_dockerfile);
}
self.build_dockerfile()
}
}

View File

@@ -1,6 +0,0 @@
use crate::modules::application::Application;
/// A StatefulApplication is an application bundle that writes persistent data.
///
/// This will enable backup features, stateful multisite replication, etc.
pub trait StatefulApplication: Application {}

View File

@@ -1,26 +0,0 @@
use crate::modules::application::{Application, features::ContinuousDeliveryApplication};
/// Marker trait for stateless application that can be deployed anywhere without worrying about
/// data.
///
/// This includes Applications fitting these categories :
///
/// - Application with all files built into the docker image and never written to, can be mounted
/// read-only
/// - Application writing to hard drive on ephemeral volume that can be lost at anytime and does
/// not require any replication/backup logic to operate
/// - Not supported : an application that writes state to a volume that must be shared or kept
/// to maintain a quorum across various instances
/// - Application connecting to a database/datastore accessible from anywhere such as
/// - Public bucket endpoint
/// - Publicly accessible
/// - Application connecting to a private database external to this application, accessible from the
/// deployment target
/// - Ensuring the private database is reachable is out of scope of this trait (for now)
///
/// The entire application definition **must not** require any persistent volume or include a
/// deployment component depending on persistent data such as a transitive PostgreSQL helm chart.
///
/// Typically, applications that can be autoscaled without additional complexity fit the
/// StatelessApplication requirements.
pub trait StatelessApplication: Application + ContinuousDeliveryApplication {}

View File

@@ -1,7 +1,7 @@
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::{info, trace};
use log::info;
use serde::Serialize;
use crate::{
@@ -23,7 +23,6 @@ pub struct DhcpScore {
pub filename64: Option<String>,
pub filenameipxe: Option<String>,
pub dhcp_range: (IpAddress, IpAddress),
pub domain: Option<String>,
}
impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
@@ -115,7 +114,6 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
DhcpHostBindingScore {
host_binding: self.score.host_binding.clone(),
domain: self.score.domain.clone(),
}
.interpret(inventory, topology)
.await?;
@@ -132,7 +130,6 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
#[derive(Debug, new, Clone, Serialize)]
pub struct DhcpHostBindingScore {
pub host_binding: Vec<HostBinding>,
pub domain: Option<String>,
}
impl<T: Topology + DhcpServer> Score<T> for DhcpHostBindingScore {
@@ -171,22 +168,16 @@ impl DhcpHostBindingInterpret {
}
};
let name = if let Some(domain) = self.score.domain.as_ref() {
format!("{}.{}", binding.logical_host.name, domain)
} else {
binding.logical_host.name.clone()
};
DHCPStaticEntry {
name,
mac: binding.physical_host.get_mac_address(),
name: binding.logical_host.name.clone(),
mac: binding.physical_host.cluster_mac(),
ip,
}
})
.collect();
info!("DHCPStaticEntry : {:?}", dhcp_entries);
trace!("DHCP server : {:?}", dhcp_server);
info!("DHCP server : {:?}", dhcp_server);
let number_new_entries = dhcp_entries.len();

View File

@@ -25,11 +25,8 @@ use harmony_types::{id::Id, net::MacAddress};
/// ```
#[derive(Debug, new, Clone, Serialize)]
pub struct StaticFilesHttpScore {
// TODO this should be split in two scores, one for folder and
// other for files
pub folder_to_serve: Option<Url>,
pub files: Vec<FileContent>,
pub remote_path: Option<String>,
}
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
@@ -57,9 +54,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
http_server.ensure_initialized().await?;
// http_server.set_ip(topology.router.get_gateway()).await?;
if let Some(folder) = self.score.folder_to_serve.as_ref() {
http_server
.serve_files(folder, &self.score.remote_path)
.await?;
http_server.serve_files(folder).await?;
}
for f in self.score.files.iter() {
@@ -110,7 +105,6 @@ impl<T: Topology + HttpServer> Score<T> for IPxeMacBootFileScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
StaticFilesHttpScore {
remote_path: None,
folder_to_serve: None,
files: self
.mac_address

View File

@@ -1,122 +0,0 @@
use async_trait::async_trait;
use harmony_types::id::Id;
use log::{error, info};
use serde::{Deserialize, Serialize};
use crate::{
data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::inventory::LaunchDiscoverInventoryAgentScore,
score::Score,
topology::Topology,
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DiscoverHostForRoleScore {
pub role: HostRole,
}
impl<T: Topology> Score<T> for DiscoverHostForRoleScore {
fn name(&self) -> String {
"DiscoverInventoryAgentScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DiscoverHostForRoleInterpret {
score: self.clone(),
})
}
}
#[derive(Debug)]
pub struct DiscoverHostForRoleInterpret {
score: DiscoverHostForRoleScore,
}
#[async_trait]
impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!(
"Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`"
);
LaunchDiscoverInventoryAgentScore {
discovery_timeout: None,
}
.interpret(inventory, topology)
.await?;
let host: PhysicalHost;
let host_repo = InventoryRepositoryFactory::build().await?;
loop {
let all_hosts = host_repo.get_all_hosts().await?;
if all_hosts.is_empty() {
info!("No discovered hosts found yet. Waiting for hosts to appear...");
// Sleep to avoid spamming the user and logs while waiting for nodes.
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
continue;
}
let ans = inquire::Select::new(
&format!("Select the node to be used for role {:?}:", self.score.role),
all_hosts,
)
.with_help_message("Press Esc to refresh the list of discovered hosts")
.prompt();
match ans {
Ok(choice) => {
info!("Selected {} as the bootstrap node.", choice.summary());
host_repo
.save_role_mapping(&self.score.role, &choice)
.await?;
host = choice;
break;
}
Err(inquire::InquireError::OperationCanceled) => {
info!("Refresh requested. Fetching list of discovered hosts again...");
continue;
}
Err(e) => {
error!(
"Failed to select node for role {:?} : {}",
self.score.role, e
);
return Err(InterpretError::new(format!(
"Could not select host : {}",
e.to_string()
)));
}
}
}
Ok(Outcome::success(format!(
"Successfully discovered host {} for role {:?}",
host.summary(),
self.score.role
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("DiscoverHostForRoleScore")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -1,72 +0,0 @@
use async_trait::async_trait;
use harmony_types::id::Id;
use log::info;
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
use crate::{
data::Version,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
score::Score,
topology::Topology,
};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct InspectInventoryScore {}
impl<T: Topology> Score<T> for InspectInventoryScore {
fn name(&self) -> String {
"InspectInventoryScore".to_string()
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(InspectInventoryInterpret {})
}
}
#[derive(Debug)]
pub struct InspectInventoryInterpret;
#[async_trait]
impl<T: Topology> Interpret<T> for InspectInventoryInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let repo = InventoryRepositoryFactory::build().await?;
for role in HostRole::iter() {
info!("Inspecting hosts for role {role:?}");
let hosts = repo.get_host_for_role(&role).await?;
info!("Hosts with role {role:?} : {}", hosts.len());
hosts.iter().enumerate().for_each(|(idx, h)| {
info!(
"Found host index {idx} with role {role:?} => \n{}\n{}",
h.summary(),
h.parts_list()
)
});
}
Ok(Outcome::success(
"Inventory inspection complete".to_string(),
))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("InspectInventoryInterpret")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -1,7 +1,3 @@
mod discovery;
pub mod inspect;
pub use discovery::*;
use async_trait::async_trait;
use harmony_inventory_agent::local_presence::DiscoveryEvent;
use log::{debug, info, trace};

View File

@@ -4,7 +4,6 @@ use std::collections::BTreeMap;
use async_trait::async_trait;
use k8s_openapi::api::core::v1::Secret;
use kube::api::ObjectMeta;
use log::debug;
use serde::Serialize;
use serde_json::json;
use serde_yaml::{Mapping, Value};
@@ -12,7 +11,6 @@ use serde_yaml::{Mapping, Value};
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::{
interpret::{InterpretError, Outcome},
modules::monitoring::{
@@ -32,71 +30,6 @@ pub struct DiscordWebhook {
pub url: Url,
}
#[async_trait]
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
data: json!({
"route": {
"receiver": self.name,
},
"receivers": [
{
"name": self.name,
"webhookConfigs": [
{
"url": self.url,
}
]
}
]
}),
};
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(sender.namespace.clone()),
..Default::default()
},
spec,
};
debug!(
"alertmanager_configs yaml:\n{:#?}",
serde_yaml::to_string(&alertmanager_configs)
);
debug!(
"alert manager configs: \n{:#?}",
alertmanager_configs.clone()
);
sender
.client
.apply(&alertmanager_configs, Some(&sender.namespace))
.await?;
Ok(Outcome::success(format!(
"installed rhob-alertmanagerconfigs for {}",
self.name
)))
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<RHOBObservability>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {

View File

@@ -11,8 +11,8 @@ use crate::{
interpret::{InterpretError, Outcome},
modules::monitoring::{
kube_prometheus::{
crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
crd::crd_alertmanager_config::{
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
},
prometheus::{KubePrometheus, KubePrometheusReceiver},
types::{AlertChannelConfig, AlertManagerChannelConfig},
@@ -29,71 +29,10 @@ pub struct WebhookReceiver {
pub url: Url,
}
#[async_trait]
impl AlertReceiver<RHOBObservability> for WebhookReceiver {
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
data: json!({
"route": {
"receiver": self.name,
},
"receivers": [
{
"name": self.name,
"webhookConfigs": [
{
"url": self.url,
}
]
}
]
}),
};
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(sender.namespace.clone()),
..Default::default()
},
spec,
};
debug!(
"alert manager configs: \n{:#?}",
alertmanager_configs.clone()
);
sender
.client
.apply(&alertmanager_configs, Some(&sender.namespace))
.await?;
Ok(Outcome::success(format!(
"installed rhob-alertmanagerconfigs for {}",
self.name
)))
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<RHOBObservability>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec {
let spec = AlertmanagerConfigSpec {
data: json!({
"route": {
"receiver": self.name,
@@ -111,7 +50,7 @@ impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
}),
};
let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfig {
let alertmanager_configs = AlertmanagerConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some(std::collections::BTreeMap::from([(
@@ -176,7 +115,6 @@ impl PrometheusReceiver for WebhookReceiver {
self.get_config().await
}
}
#[async_trait]
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {

View File

@@ -1,2 +1 @@
pub mod application_monitoring_score;
pub mod rhobs_application_monitoring_score;

View File

@@ -1,94 +0,0 @@
use std::sync::Arc;
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::{
application::Application,
monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
},
prometheus::prometheus::PrometheusApplicationMonitoring,
},
score::Score,
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
};
use harmony_types::id::Id;
#[derive(Debug, Clone, Serialize)]
pub struct ApplicationRHOBMonitoringScore {
pub sender: RHOBObservability,
pub application: Arc<dyn Application>,
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
}
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
for ApplicationRHOBMonitoringScore
{
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(ApplicationRHOBMonitoringInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
format!(
"{} monitoring [ApplicationRHOBMonitoringScore]",
self.application.name()
)
}
}
#[derive(Debug)]
pub struct ApplicationRHOBMonitoringInterpret {
score: ApplicationRHOBMonitoringScore,
}
#[async_trait]
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
for ApplicationRHOBMonitoringInterpret
{
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let result = topology
.install_prometheus(
&self.score.sender,
inventory,
Some(self.score.receivers.clone()),
)
.await;
match result {
Ok(outcome) => match outcome {
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => Ok(Outcome::noop()),
},
Err(err) => Err(InterpretError::from(err)),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::ApplicationMonitoring
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -7,15 +7,5 @@ pub mod crd_prometheuses;
pub mod grafana_default_dashboard;
pub mod grafana_operator;
pub mod prometheus_operator;
pub mod rhob_alertmanager_config;
pub mod rhob_alertmanagers;
pub mod rhob_cluster_observability_operator;
pub mod rhob_default_rules;
pub mod rhob_grafana;
pub mod rhob_monitoring_stack;
pub mod rhob_prometheus_rules;
pub mod rhob_prometheuses;
pub mod rhob_role;
pub mod rhob_service_monitor;
pub mod role;
pub mod service_monitor;

View File

@@ -1,50 +0,0 @@
use std::sync::Arc;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::topology::{
k8s::K8sClient,
oberservability::monitoring::{AlertReceiver, AlertSender},
};
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.rhobs",
version = "v1alpha1",
kind = "AlertmanagerConfig",
plural = "alertmanagerconfigs",
namespaced
)]
pub struct AlertmanagerConfigSpec {
#[serde(flatten)]
pub data: serde_json::Value,
}
#[derive(Debug, Clone, Serialize)]
pub struct RHOBObservability {
pub namespace: String,
pub client: Arc<K8sClient>,
}
impl AlertSender for RHOBObservability {
fn name(&self) -> String {
"RHOBAlertManager".to_string()
}
}
impl Clone for Box<dyn AlertReceiver<RHOBObservability>> {
fn clone(&self) -> Self {
self.clone_box()
}
}
impl Serialize for Box<dyn AlertReceiver<RHOBObservability>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}

View File

@@ -1,52 +0,0 @@
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use super::crd_prometheuses::LabelSelector;
/// Rust CRD for `Alertmanager` from Prometheus Operator
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.rhobs",
version = "v1",
kind = "Alertmanager",
plural = "alertmanagers",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct AlertmanagerSpec {
/// Number of replicas for HA
pub replicas: i32,
/// Selectors for AlertmanagerConfig CRDs
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alertmanager_config_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alertmanager_config_namespace_selector: Option<LabelSelector>,
/// Optional pod template metadata (annotations, labels)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pod_metadata: Option<LabelSelector>,
/// Optional topology spread settings
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
impl Default for AlertmanagerSpec {
fn default() -> Self {
AlertmanagerSpec {
replicas: 1,
// Match all AlertmanagerConfigs in the same namespace
alertmanager_config_namespace_selector: None,
// Empty selector matches all AlertmanagerConfigs in that namespace
alertmanager_config_selector: Some(LabelSelector::default()),
pod_metadata: None,
version: None,
}
}
}

View File

@@ -1,22 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
//TODO package chart or something for COO okd
pub fn rhob_cluster_observability_operator() -> HelmChartScore {
HelmChartScore {
namespace: None,
release_name: NonBlankString::from_str("").unwrap(),
chart_name: NonBlankString::from_str(
"oci://hub.nationtech.io/harmony/nt-prometheus-operator",
)
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: None,
create_namespace: true,
install_only: true,
repository: None,
}
}

View File

@@ -1,26 +0,0 @@
use crate::modules::{
monitoring::kube_prometheus::crd::rhob_prometheus_rules::Rule,
prometheus::alerts::k8s::{
deployment::alert_deployment_unavailable,
pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
pvc::high_pvc_fill_rate_over_two_days,
service::alert_service_down,
},
};
pub fn build_default_application_rules() -> Vec<Rule> {
let pod_failed: Rule = pod_failed().into();
let container_restarting: Rule = alert_container_restarting().into();
let pod_not_ready: Rule = alert_pod_not_ready().into();
let service_down: Rule = alert_service_down().into();
let deployment_unavailable: Rule = alert_deployment_unavailable().into();
let high_pvc_fill_rate: Rule = high_pvc_fill_rate_over_two_days().into();
vec![
pod_failed,
container_restarting,
pod_not_ready,
service_down,
deployment_unavailable,
high_pvc_fill_rate,
]
}

View File

@@ -1,153 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "Grafana",
plural = "grafanas",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub config: Option<GrafanaConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_user: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ingress: Option<GrafanaIngress>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub persistence: Option<GrafanaPersistence>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resources: Option<ResourceRequirements>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub log: Option<GrafanaLogConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub security: Option<GrafanaSecurityConfig>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaLogConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub mode: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSecurityConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_user: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaIngress {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hosts: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaPersistence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub storage_class_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
}
// ------------------------------------------------------------------------------------------------
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "GrafanaDashboard",
plural = "grafanadashboards",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDashboardSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resync_period: Option<String>,
pub instance_selector: LabelSelector,
pub json: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "GrafanaDatasource",
plural = "grafanadatasources",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceSpec {
pub instance_selector: LabelSelector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub allow_cross_namespace_import: Option<bool>,
pub datasource: GrafanaDatasourceConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceConfig {
pub access: String,
pub database: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub json_data: Option<BTreeMap<String, String>>,
pub name: String,
pub r#type: String,
pub url: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct ResourceRequirements {
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub limits: BTreeMap<String, String>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub requests: BTreeMap<String, String>,
}

View File

@@ -1,41 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.rhobs",
version = "v1alpha1",
kind = "MonitoringStack",
plural = "monitoringstacks",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct MonitoringStackSpec {
/// Verbosity of logs (e.g. "debug", "info", "warn", "error").
#[serde(default, skip_serializing_if = "Option::is_none")]
pub log_level: Option<String>,
/// Retention period for Prometheus TSDB data (e.g. "1d").
#[serde(default, skip_serializing_if = "Option::is_none")]
pub retention: Option<String>,
/// Resource selector for workloads monitored by this stack.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource_selector: Option<LabelSelector>,
}
impl Default for MonitoringStackSpec {
fn default() -> Self {
MonitoringStackSpec {
log_level: Some("info".into()),
retention: Some("7d".into()),
resource_selector: Some(LabelSelector::default()),
}
}
}

View File

@@ -1,57 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)]
#[kube(
group = "monitoring.rhobs",
version = "v1",
kind = "PrometheusRule",
plural = "prometheusrules",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct PrometheusRuleSpec {
pub groups: Vec<RuleGroup>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct RuleGroup {
pub name: String,
pub rules: Vec<Rule>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct Rule {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alert: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub expr: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub for_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub labels: Option<std::collections::BTreeMap<String, String>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub annotations: Option<std::collections::BTreeMap<String, String>>,
}
impl From<PrometheusAlertRule> for Rule {
fn from(value: PrometheusAlertRule) -> Self {
Rule {
alert: Some(value.alert),
expr: Some(value.expr),
for_: value.r#for,
labels: Some(value.labels.into_iter().collect::<BTreeMap<_, _>>()),
annotations: Some(value.annotations.into_iter().collect::<BTreeMap<_, _>>()),
}
}
}

View File

@@ -1,118 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::types::Operator;
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.rhobs",
version = "v1",
kind = "Prometheus",
plural = "prometheuses",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct PrometheusSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alerting: Option<PrometheusSpecAlerting>,
pub service_account_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_monitor_namespace_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_monitor_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_discovery_role: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pod_monitor_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rule_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rule_namespace_selector: Option<LabelSelector>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct NamespaceSelector {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_names: Vec<String>,
}
/// Contains alerting configuration, specifically Alertmanager endpoints.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
pub struct PrometheusSpecAlerting {
#[serde(skip_serializing_if = "Option::is_none")]
pub alertmanagers: Option<Vec<AlertmanagerEndpoints>>,
}
/// Represents an Alertmanager endpoint configuration used by Prometheus.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
pub struct AlertmanagerEndpoints {
/// Name of the Alertmanager Service.
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// Namespace of the Alertmanager Service.
#[serde(skip_serializing_if = "Option::is_none")]
pub namespace: Option<String>,
/// Port to access on the Alertmanager Service (e.g. "web").
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<String>,
/// Scheme to use for connecting (e.g. "http").
#[serde(skip_serializing_if = "Option::is_none")]
pub scheme: Option<String>,
// Other fields like `tls_config`, `path_prefix`, etc., can be added if needed.
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct LabelSelector {
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub match_labels: BTreeMap<String, String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_expressions: Vec<LabelSelectorRequirement>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct LabelSelectorRequirement {
pub key: String,
pub operator: Operator,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
}
impl Default for PrometheusSpec {
fn default() -> Self {
PrometheusSpec {
alerting: None,
service_account_name: "prometheus".into(),
// null means "only my namespace"
service_monitor_namespace_selector: None,
// empty selector means match all ServiceMonitors in that namespace
service_monitor_selector: Some(LabelSelector::default()),
service_discovery_role: Some("Endpoints".into()),
pod_monitor_selector: None,
rule_selector: None,
rule_namespace_selector: Some(LabelSelector::default()),
}
}
}

View File

@@ -1,62 +0,0 @@
use k8s_openapi::api::{
core::v1::ServiceAccount,
rbac::v1::{PolicyRule, Role, RoleBinding, RoleRef, Subject},
};
use kube::api::ObjectMeta;
pub fn build_prom_role(role_name: String, namespace: String) -> Role {
Role {
metadata: ObjectMeta {
name: Some(role_name),
namespace: Some(namespace),
..Default::default()
},
rules: Some(vec![PolicyRule {
api_groups: Some(vec!["".into()]), // core API group
resources: Some(vec!["services".into(), "endpoints".into(), "pods".into()]),
verbs: vec!["get".into(), "list".into(), "watch".into()],
..Default::default()
}]),
}
}
pub fn build_prom_rolebinding(
role_name: String,
namespace: String,
service_account_name: String,
) -> RoleBinding {
RoleBinding {
metadata: ObjectMeta {
name: Some(format!("{}-rolebinding", role_name)),
namespace: Some(namespace.clone()),
..Default::default()
},
role_ref: RoleRef {
api_group: "rbac.authorization.k8s.io".into(),
kind: "Role".into(),
name: role_name,
},
subjects: Some(vec![Subject {
kind: "ServiceAccount".into(),
name: service_account_name,
namespace: Some(namespace.clone()),
..Default::default()
}]),
}
}
pub fn build_prom_service_account(
service_account_name: String,
namespace: String,
) -> ServiceAccount {
ServiceAccount {
automount_service_account_token: None,
image_pull_secrets: None,
metadata: ObjectMeta {
name: Some(service_account_name),
namespace: Some(namespace),
..Default::default()
},
secrets: None,
}
}

View File

@@ -1,87 +0,0 @@
use std::collections::HashMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::types::{
HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector,
ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint,
};
/// This is the top-level struct for the ServiceMonitor Custom Resource.
/// The `#[derive(CustomResource)]` macro handles all the boilerplate for you,
/// including the `impl Resource`.
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.rhobs",
version = "v1",
kind = "ServiceMonitor",
plural = "servicemonitors",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct ServiceMonitorSpec {
/// A label selector to select services to monitor.
pub selector: Selector,
/// A list of endpoints on the selected services to be monitored.
pub endpoints: Vec<ServiceMonitorEndpoint>,
/// Selector to select which namespaces the Kubernetes Endpoints objects
/// are discovered from.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace_selector: Option<NamespaceSelector>,
/// The label to use to retrieve the job name from.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub job_label: Option<String>,
/// Pod-based target labels to transfer from the Kubernetes Pod onto the target.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub pod_target_labels: Vec<String>,
/// TargetLabels transfers labels on the Kubernetes Service object to the target.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub target_labels: Vec<String>,
}
impl Default for ServiceMonitorSpec {
fn default() -> Self {
let labels = HashMap::new();
Self {
selector: Selector {
match_labels: { labels },
match_expressions: vec![MatchExpression {
key: "app.kubernetes.io/name".into(),
operator: Operator::Exists,
values: vec![],
}],
},
endpoints: vec![ServiceMonitorEndpoint {
port: Some("http".to_string()),
path: Some("/metrics".into()),
interval: Some("30s".into()),
scheme: Some(HTTPScheme::HTTP),
..Default::default()
}],
namespace_selector: None, // only the same namespace
job_label: Some("app".into()),
pod_target_labels: vec![],
target_labels: vec![],
}
}
}
impl From<KubeServiceMonitor> for ServiceMonitorSpec {
fn from(value: KubeServiceMonitor) -> Self {
Self {
selector: value.selector,
endpoints: value.endpoints,
namespace_selector: value.namespace_selector,
job_label: value.job_label,
pod_target_labels: value.pod_target_labels,
target_labels: value.target_labels,
}
}
}

View File

@@ -1,120 +0,0 @@
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::{error, info, warn};
use serde::Serialize;
use crate::{
data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore},
score::Score,
topology::HAClusterTopology,
};
// -------------------------------------------------------------------------------------------------
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
// - This score exposes/ensures the default inventory assets and waits for discoveries.
// - No early bonding. Simple access DHCP.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDSetup01InventoryScore {}
impl Score<HAClusterTopology> for OKDSetup01InventoryScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup01InventoryInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup01InventoryScore".to_string()
}
}
#[derive(Debug, Clone)]
pub struct OKDSetup01InventoryInterpret {
score: OKDSetup01InventoryScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup01InventoryInterpret {
pub fn new(score: OKDSetup01InventoryScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup01InventoryInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup01Inventory")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
info!("Setting up base DNS config for OKD");
let cluster_domain = &topology.domain_name;
let load_balancer_ip = &topology.load_balancer.get_ip();
inquire::Confirm::new(&format!(
"Set hostnames manually in your opnsense dnsmasq config :
*.apps.{cluster_domain} -> {load_balancer_ip}
api.{cluster_domain} -> {load_balancer_ip}
api-int.{cluster_domain} -> {load_balancer_ip}
When you can dig them, confirm to continue.
"
))
.prompt()
.expect("Prompt error");
// TODO reactivate automatic dns config when migration from unbound to dnsmasq is done
// OKDDnsScore::new(topology)
// .interpret(inventory, topology)
// .await?;
// TODO refactor this section into a function discover_hosts_for_role(...) that can be used
// from anywhere in the project, not a member of this struct
let mut bootstrap_host: Option<PhysicalHost> = None;
let repo = InventoryRepositoryFactory::build().await?;
while bootstrap_host.is_none() {
let hosts = repo.get_host_for_role(&HostRole::Bootstrap).await?;
bootstrap_host = hosts.into_iter().next().to_owned();
DiscoverHostForRoleScore {
role: HostRole::Bootstrap,
}
.interpret(inventory, topology)
.await?;
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Found and assigned bootstrap node: {}",
bootstrap_host.unwrap().summary()
),
))
}
}

View File

@@ -1,387 +0,0 @@
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
},
},
score::Score,
topology::{HAClusterTopology, HostBinding},
};
// -------------------------------------------------------------------------------------------------
// Step 02: Bootstrap
// - Select bootstrap node (from discovered set).
// - Render per-MAC iPXE pointing to OKD 4.19 SCOS live assets + bootstrap ignition.
// - Reboot the host via SSH and wait for bootstrap-complete.
// - No bonding at this stage unless absolutely required; prefer persistence via MC later.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDSetup02BootstrapScore {}
impl Score<HAClusterTopology> for OKDSetup02BootstrapScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup02BootstrapInterpret::new())
}
fn name(&self) -> String {
"OKDSetup02BootstrapScore".to_string()
}
}
#[derive(Debug, Clone)]
pub struct OKDSetup02BootstrapInterpret {
version: Version,
status: InterpretStatus,
}
impl OKDSetup02BootstrapInterpret {
pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
status: InterpretStatus::QUEUED,
}
}
async fn get_bootstrap_node(&self) -> Result<PhysicalHost, InterpretError> {
let repo = InventoryRepositoryFactory::build().await?;
match repo
.get_host_for_role(&HostRole::Bootstrap)
.await?
.into_iter()
.next()
{
Some(host) => Ok(host),
None => Err(InterpretError::new(
"No bootstrap node available".to_string(),
)),
}
}
async fn prepare_ignition_files(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let okd_bin_path = PathBuf::from("./data/okd/bin");
let okd_installation_path_str =
format!("./data/okd/installation_files_{}", inventory.location.name);
let okd_images_path = &PathBuf::from("./data/okd/installer_image/");
let okd_installation_path = &PathBuf::from(okd_installation_path_str);
let exit_status = Command::new("mkdir")
.arg("-p")
.arg(okd_installation_path)
.spawn()
.expect("Command failed to start")
.wait()
.await
.map_err(|e| {
InterpretError::new(format!("Failed to create okd installation directory : {e}"))
})?;
if !exit_status.success() {
return Err(InterpretError::new(format!(
"Failed to create okd installation directory"
)));
} else {
info!(
"Created OKD installation directory {}",
okd_installation_path.to_string_lossy()
);
}
let redhat_secret = SecretManager::get_or_prompt::<RedhatSecret>().await?;
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await?;
let install_config_yaml = InstallConfigYaml {
cluster_name: &topology.get_cluster_name(),
cluster_domain: &topology.get_cluster_base_domain(),
pull_secret: &redhat_secret.pull_secret,
ssh_public_key: &ssh_key.public,
}
.to_string();
let install_config_file_path = &okd_installation_path.join("install-config.yaml");
self.create_file(install_config_file_path, install_config_yaml.as_bytes())
.await?;
let install_config_backup_extension = install_config_file_path
.extension()
.map(|e| format!("{}.bak", e.to_string_lossy()))
.unwrap_or("bak".to_string());
let mut install_config_backup = install_config_file_path.clone();
install_config_backup.set_extension(install_config_backup_extension);
self.create_file(&install_config_backup, install_config_yaml.as_bytes())
.await?;
info!("Creating manifest files with openshift-install");
let output = Command::new(okd_bin_path.join("openshift-install"))
.args([
"create",
"manifests",
"--dir",
okd_installation_path.to_str().unwrap(),
])
.output()
.await
.map_err(|e| InterpretError::new(format!("Failed to create okd manifest : {e}")))?;
let stdout = String::from_utf8(output.stdout).unwrap();
info!("openshift-install stdout :\n\n{}", stdout);
let stderr = String::from_utf8(output.stderr).unwrap();
info!("openshift-install stderr :\n\n{}", stderr);
info!("openshift-install exit status : {}", output.status);
if !output.status.success() {
return Err(InterpretError::new(format!(
"Failed to create okd manifest, exit code {} : {}",
output.status, stderr
)));
}
info!("Creating ignition files with openshift-install");
let output = Command::new(okd_bin_path.join("openshift-install"))
.args([
"create",
"ignition-configs",
"--dir",
okd_installation_path.to_str().unwrap(),
])
.output()
.await
.map_err(|e| {
InterpretError::new(format!("Failed to create okd ignition config : {e}"))
})?;
let stdout = String::from_utf8(output.stdout).unwrap();
info!("openshift-install stdout :\n\n{}", stdout);
let stderr = String::from_utf8(output.stderr).unwrap();
info!("openshift-install stderr :\n\n{}", stderr);
info!("openshift-install exit status : {}", output.status);
if !output.status.success() {
return Err(InterpretError::new(format!(
"Failed to create okd manifest, exit code {} : {}",
output.status, stderr
)));
}
let ignition_files_http_path = PathBuf::from("okd_ignition_files");
let prepare_file_content = async |filename: &str| -> Result<FileContent, InterpretError> {
let local_path = okd_installation_path.join(filename);
let remote_path = ignition_files_http_path.join(filename);
info!(
"Preparing file content for local file : {} to remote : {}",
local_path.to_string_lossy(),
remote_path.to_string_lossy()
);
let content = tokio::fs::read_to_string(&local_path).await.map_err(|e| {
InterpretError::new(format!(
"Could not read file content {} : {e}",
local_path.to_string_lossy()
))
})?;
Ok(FileContent {
path: FilePath::Relative(remote_path.to_string_lossy().to_string()),
content,
})
};
StaticFilesHttpScore {
remote_path: None,
folder_to_serve: None,
files: vec![
prepare_file_content("bootstrap.ign").await?,
prepare_file_content("master.ign").await?,
prepare_file_content("worker.ign").await?,
prepare_file_content("metadata.json").await?,
],
}
.interpret(inventory, topology)
.await?;
info!("Successfully prepared ignition files for OKD installation");
// ignition_files_http_path // = PathBuf::from("okd_ignition_files");
info!(
r#"Uploading images, they can be refreshed with a command similar to this one: openshift-install coreos print-stream-json | grep -Eo '"https.*(kernel.|initramfs.|rootfs.)\w+(\.img)?"' | grep x86_64 | xargs -n 1 curl -LO"#
);
inquire::Confirm::new(
&format!("push installer image files with `scp -r {}/* root@{}:/usr/local/http/scos/` until performance issue is resolved", okd_images_path.to_string_lossy(), topology.http_server.get_ip())).prompt().expect("Prompt error");
// let scos_http_path = PathBuf::from("scos");
// StaticFilesHttpScore {
// folder_to_serve: Some(Url::LocalFolder(
// okd_images_path.to_string_lossy().to_string(),
// )),
// remote_path: Some(scos_http_path.to_string_lossy().to_string()),
// files: vec![],
// }
// .interpret(inventory, topology)
// .await?;
Ok(())
}
async fn configure_host_binding(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let binding = HostBinding {
logical_host: topology.bootstrap_host.clone(),
physical_host: self.get_bootstrap_node().await?,
};
info!("Configuring host binding for bootstrap node {binding:?}");
DhcpHostBindingScore {
host_binding: vec![binding],
domain: Some(topology.domain_name.clone()),
}
.interpret(inventory, topology)
.await?;
Ok(())
}
async fn render_per_mac_pxe(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let content = BootstrapIpxeTpl {
http_ip: &topology.http_server.get_ip().to_string(),
scos_path: "scos", // TODO use some constant
ignition_http_path: "okd_ignition_files", // TODO use proper variable
installation_device: "/dev/sda",
ignition_file_name: "bootstrap.ign",
}
.to_string();
let bootstrap_node = self.get_bootstrap_node().await?;
let mac_address = bootstrap_node.get_mac_address();
info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node");
debug!("bootstrap ipxe content : {content}");
debug!("bootstrap mac addresses : {mac_address:?}");
IPxeMacBootFileScore {
mac_address,
content,
}
.interpret(inventory, topology)
.await?;
Ok(())
}
async fn setup_bootstrap_load_balancer(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let outcome = OKDBootstrapLoadBalancerScore::new(topology)
.interpret(inventory, topology)
.await?;
info!("Successfully executed OKDBootstrapLoadBalancerScore : {outcome:?}");
Ok(())
}
async fn reboot_target(&self) -> Result<(), InterpretError> {
// Placeholder: ssh reboot using the inventory ephemeral key
info!("[Bootstrap] Rebooting bootstrap node via SSH");
// TODO reboot programatically, there are some logical checks and refactoring to do such as
// accessing the bootstrap node config (ip address) from the inventory
let confirmation = inquire::Confirm::new(
"Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.",
)
.prompt()
.expect("Unexpected prompt error");
Ok(())
}
async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> {
// Placeholder: wait-for bootstrap-complete
info!("[Bootstrap] Waiting for bootstrap-complete …");
todo!("[Bootstrap] Waiting for bootstrap-complete …")
}
async fn create_file(&self, path: &PathBuf, content: &[u8]) -> Result<(), InterpretError> {
let mut install_config_file = File::create(path).await.map_err(|e| {
InterpretError::new(format!(
"Could not create file {} : {e}",
path.to_string_lossy()
))
})?;
install_config_file.write(content).await.map_err(|e| {
InterpretError::new(format!(
"Could not write file {} : {e}",
path.to_string_lossy()
))
})?;
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup02Bootstrap")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.configure_host_binding(inventory, topology).await?;
self.prepare_ignition_files(inventory, topology).await?;
self.render_per_mac_pxe(inventory, topology).await?;
self.setup_bootstrap_load_balancer(inventory, topology)
.await?;
// TODO https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-user-provisioned-validating-dns_installing-bare-metal
// self.validate_dns_config(inventory, topology).await?;
self.reboot_target().await?;
self.wait_for_bootstrap_complete().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Bootstrap phase complete".into(),
))
}
}

View File

@@ -1,277 +0,0 @@
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize;
use crate::{
data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
},
score::Score,
topology::{HAClusterTopology, HostBinding},
};
// -------------------------------------------------------------------------------------------------
// Step 03: Control Plane
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDSetup03ControlPlaneScore {}
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup03ControlPlaneScore".to_string()
}
}
#[derive(Debug, Clone)]
pub struct OKDSetup03ControlPlaneInterpret {
score: OKDSetup03ControlPlaneScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup03ControlPlaneInterpret {
pub fn new(score: OKDSetup03ControlPlaneScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
/// It will trigger discovery if not enough hosts are found.
async fn get_nodes(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Vec<PhysicalHost>, InterpretError> {
const REQUIRED_HOSTS: usize = 3;
let repo = InventoryRepositoryFactory::build().await?;
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
while control_plane_hosts.len() < REQUIRED_HOSTS {
info!(
"Discovery of {} control plane hosts in progress, current number {}",
REQUIRED_HOSTS,
control_plane_hosts.len()
);
// This score triggers the discovery agent for a specific role.
DiscoverHostForRoleScore {
role: HostRole::ControlPlane,
}
.interpret(inventory, topology)
.await?;
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
}
if control_plane_hosts.len() < REQUIRED_HOSTS {
Err(InterpretError::new(format!(
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
REQUIRED_HOSTS,
control_plane_hosts.len()
)))
} else {
// Take exactly the number of required hosts to ensure consistency.
Ok(control_plane_hosts
.into_iter()
.take(REQUIRED_HOSTS)
.collect())
}
}
/// Configures DHCP host bindings for all control plane nodes.
async fn configure_host_binding(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
nodes: &Vec<PhysicalHost>,
) -> Result<(), InterpretError> {
info!("[ControlPlane] Configuring host bindings for control plane nodes.");
// Ensure the topology definition matches the number of physical nodes found.
if topology.control_plane.len() != nodes.len() {
return Err(InterpretError::new(format!(
"Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).",
topology.control_plane.len(),
nodes.len()
)));
}
// Create a binding for each physical host to its corresponding logical host.
let bindings: Vec<HostBinding> = topology
.control_plane
.iter()
.zip(nodes.iter())
.map(|(logical_host, physical_host)| {
info!(
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
logical_host.name, physical_host.id
);
HostBinding {
logical_host: logical_host.clone(),
physical_host: physical_host.clone(),
}
})
.collect();
DhcpHostBindingScore {
host_binding: bindings,
domain: Some(topology.domain_name.clone()),
}
.interpret(inventory, topology)
.await?;
Ok(())
}
/// Renders and deploys a per-MAC iPXE boot file for each control plane node.
async fn configure_ipxe(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
nodes: &Vec<PhysicalHost>,
) -> Result<(), InterpretError> {
info!("[ControlPlane] Rendering per-MAC iPXE configurations.");
// The iPXE script content is the same for all control plane nodes,
// pointing to the 'master.ign' ignition file.
let content = BootstrapIpxeTpl {
http_ip: &topology.http_server.get_ip().to_string(),
scos_path: "scos",
ignition_http_path: "okd_ignition_files",
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
}
.to_string();
debug!("[ControlPlane] iPXE content template:\n{}", content);
// Create and apply an iPXE boot file for each node.
for node in nodes {
let mac_address = node.get_mac_address();
if mac_address.is_empty() {
return Err(InterpretError::new(format!(
"Physical host with ID '{}' has no MAC addresses defined.",
node.id
)));
}
info!(
"[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}",
node.id, mac_address
);
IPxeMacBootFileScore {
mac_address,
content: content.clone(),
}
.interpret(inventory, topology)
.await?;
}
Ok(())
}
/// Prompts the user to reboot the target control plane nodes.
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
info!(
"[ControlPlane] Requesting reboot for control plane nodes: {:?}",
node_ids
);
let confirmation = inquire::Confirm::new(
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
)
.prompt()
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
if !confirmation {
return Err(InterpretError::new(
"User aborted the operation.".to_string(),
));
}
Ok(())
}
/// Placeholder for automating network bonding configuration.
async fn persist_network_bond(&self) -> Result<(), InterpretError> {
// Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP");
inquire::Confirm::new(
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
)
.prompt()
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup03ControlPlane")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
// 1. Ensure we have 3 physical hosts for the control plane.
let nodes = self.get_nodes(inventory, topology).await?;
// 2. Create DHCP reservations for the control plane nodes.
self.configure_host_binding(inventory, topology, &nodes)
.await?;
// 3. Create iPXE files for each control plane node to boot from the master ignition.
self.configure_ipxe(inventory, topology, &nodes).await?;
// 4. Reboot the nodes to start the OS installation.
self.reboot_targets(&nodes).await?;
// 5. Placeholder for post-boot network configuration (e.g., bonding).
self.persist_network_bond().await?;
// TODO: Implement a step to wait for the control plane nodes to join the cluster
// and for the cluster operators to become available. This would be similar to
// the `wait-for bootstrap-complete` command.
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Control plane provisioning has been successfully initiated.".into(),
))
}
}

View File

@@ -1,102 +0,0 @@
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
},
},
score::Score,
topology::{HAClusterTopology, HostBinding},
};
// -------------------------------------------------------------------------------------------------
// Step 04: Workers
// - Render per-MAC PXE & ignition for workers; join nodes.
// - Persist bonding via MC/NNCP as required (same approach as masters).
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDSetup04WorkersScore {}
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup04WorkersScore".to_string()
}
}
#[derive(Debug, Clone)]
pub struct OKDSetup04WorkersInterpret {
score: OKDSetup04WorkersScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup04WorkersInterpret {
pub fn new(score: OKDSetup04WorkersScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup04Workers")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.render_and_reboot().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Workers provisioned".into(),
))
}
}

View File

@@ -1,101 +0,0 @@
use std::{fmt::Write, path::PathBuf};
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
},
},
score::Score,
topology::{HAClusterTopology, HostBinding},
};
// -------------------------------------------------------------------------------------------------
// Step 05: Sanity Check
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDSetup05SanityCheckScore {}
impl Score<HAClusterTopology> for OKDSetup05SanityCheckScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup05SanityCheckInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup05SanityCheckScore".to_string()
}
}
#[derive(Debug, Clone)]
pub struct OKDSetup05SanityCheckInterpret {
score: OKDSetup05SanityCheckScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup05SanityCheckInterpret {
pub fn new(score: OKDSetup05SanityCheckScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn run_checks(&self) -> Result<(), InterpretError> {
info!("[Sanity] Checking API, COs, Ingress, and SDN health …");
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup05SanityCheck")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.run_checks().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Sanity checks passed".into(),
))
}
}

View File

@@ -1,101 +0,0 @@
// -------------------------------------------------------------------------------------------------
use async_trait::async_trait;
use derive_new::new;
use harmony_secret::SecretManager;
use harmony_types::id::Id;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use std::{fmt::Write, path::PathBuf};
use tokio::{fs::File, io::AsyncWriteExt, process::Command};
use crate::{
config::secret::{RedhatSecret, SshKeyPair},
data::{FileContent, FilePath, Version},
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore,
http::{IPxeMacBootFileScore, StaticFilesHttpScore},
inventory::LaunchDiscoverInventoryAgentScore,
okd::{
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
templates::{BootstrapIpxeTpl, InstallConfigYaml},
},
},
score::Score,
topology::{HAClusterTopology, HostBinding},
};
// Step 06: Installation Report
// - Emit JSON and concise human summary of nodes, roles, versions, and health.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
pub struct OKDSetup06InstallationReportScore {}
impl Score<HAClusterTopology> for OKDSetup06InstallationReportScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup06InstallationReportInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup06InstallationReportScore".to_string()
}
}
#[derive(Debug, Clone)]
pub struct OKDSetup06InstallationReportInterpret {
score: OKDSetup06InstallationReportScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup06InstallationReportInterpret {
pub fn new(score: OKDSetup06InstallationReportScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn generate(&self) -> Result<(), InterpretError> {
info!("[Report] Generating OKD installation report",);
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup06InstallationReport")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.generate().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Installation report generated".into(),
))
}
}

View File

@@ -8,7 +8,7 @@ use crate::{
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
LoadBalancerService, SSL, Topology,
LoadBalancerService, Topology,
},
};
@@ -44,7 +44,6 @@ impl OKDBootstrapLoadBalancerScore {
"/readyz".to_string(),
HttpMethod::GET,
HttpStatusCode::Success2xx,
SSL::SSL,
)),
},
];
@@ -55,7 +54,6 @@ impl OKDBootstrapLoadBalancerScore {
},
}
}
fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
let mut backend: Vec<_> = topology
.control_plane
@@ -65,14 +63,6 @@ impl OKDBootstrapLoadBalancerScore {
port,
})
.collect();
topology.workers.iter().for_each(|worker| {
backend.push(BackendServer {
address: worker.ip.to_string(),
port,
})
});
backend.push(BackendServer {
address: topology.bootstrap_host.ip.to_string(),
port,

View File

@@ -71,7 +71,6 @@ impl OKDDhcpScore {
topology.router.get_gateway()
)),
dhcp_range: (IpAddress::from(start), IpAddress::from(end)),
domain: Some(topology.domain_name.clone()),
},
}
}

View File

@@ -44,30 +44,784 @@
//! which must be configured on host AND switch to connect properly.
//!
//! Configuration knobs
//! - lan_cidr: CIDR to scan/allow for discovery endpoints.
//! - public_domain: External wildcard/apps domain (e.g., apps.example.com).
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
use async_trait::async_trait;
use chrono::Duration;
use derive_new::new;
use harmony_types::id::Id;
use log::{error, info, warn};
use serde::{Deserialize, Serialize};
use crate::{
modules::okd::{
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore,
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
data::Version,
hardware::PhysicalHost,
infra::inventory::InventoryRepositoryFactory,
instrumentation::{HarmonyEvent, instrument},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::{HostRole, Inventory},
modules::{
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
inventory::LaunchDiscoverInventoryAgentScore,
},
score::Score,
topology::HAClusterTopology,
topology::{HAClusterTopology, HostBinding},
};
pub struct OKDInstallationPipeline;
// -------------------------------------------------------------------------------------------------
// Public Orchestrator Score
// -------------------------------------------------------------------------------------------------
impl OKDInstallationPipeline {
pub async fn get_all_scores() -> Vec<Box<dyn Score<HAClusterTopology>>> {
vec![
Box::new(OKDSetup01InventoryScore::new()),
Box::new(OKDSetup02BootstrapScore::new()),
Box::new(OKDSetup03ControlPlaneScore::new()),
Box::new(OKDSetup04WorkersScore::new()),
Box::new(OKDSetup05SanityCheckScore::new()),
Box::new(OKDSetup06InstallationReportScore::new()),
]
#[derive(Debug, Clone, Serialize, Deserialize, new)]
pub struct OKDInstallationScore {
/// The LAN CIDR where discovery endpoints live (e.g., 192.168.10.0/24)
pub lan_cidr: String,
/// Public external domain (e.g., example.com). Used for api/apps wildcard, etc.
pub public_domain: String,
/// Internal cluster domain (e.g., harmony.mcd). Used for internal svc/ingress and DNS.
pub internal_domain: String,
}
impl Score<HAClusterTopology> for OKDInstallationScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDInstallationInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDInstallationScore".to_string()
}
}
// -------------------------------------------------------------------------------------------------
// Orchestrator Interpret
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone)]
pub struct OKDInstallationInterpret {
score: OKDInstallationScore,
version: Version,
status: InterpretStatus,
}
impl OKDInstallationInterpret {
pub fn new(score: OKDInstallationScore) -> Self {
let version = Version::from("0.1.0").expect("valid version");
Self {
score,
version,
status: InterpretStatus::QUEUED,
}
}
async fn run_inventory_phase(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
// 1) Prepare DNS and DHCP lease registration (optional)
// 2) Serve default iPXE + Kickstart and poll discovery
let discovery_score = OKDSetup01InventoryScore::new(self.score.lan_cidr.clone());
discovery_score.interpret(inventory, topology).await?;
Ok(())
}
async fn run_bootstrap_phase(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
// Select and provision bootstrap
let bootstrap_score = OKDSetup02BootstrapScore::new(
self.score.public_domain.clone(),
self.score.internal_domain.clone(),
);
bootstrap_score.interpret(inventory, topology).await?;
Ok(())
}
async fn run_control_plane_phase(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let control_plane_score = OKDSetup03ControlPlaneScore::new();
control_plane_score.interpret(inventory, topology).await?;
Ok(())
}
async fn run_workers_phase(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let workers_score = OKDSetup04WorkersScore::new();
workers_score.interpret(inventory, topology).await?;
Ok(())
}
async fn run_sanity_phase(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let sanity_score = OKDSetup05SanityCheckScore::new();
sanity_score.interpret(inventory, topology).await?;
Ok(())
}
async fn run_report_phase(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let report_score = OKDSetup06InstallationReportScore::new(
self.score.public_domain.clone(),
self.score.internal_domain.clone(),
);
report_score.interpret(inventory, topology).await?;
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDInstallationInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDInstallationInterpret")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
instrument(HarmonyEvent::HarmonyStarted).ok();
info!(
"Starting OKD installation pipeline for public_domain={} internal_domain={} lan_cidr={}",
self.score.public_domain, self.score.internal_domain, self.score.lan_cidr
);
self.run_inventory_phase(inventory, topology).await?;
self.run_bootstrap_phase(inventory, topology).await?;
self.run_control_plane_phase(inventory, topology).await?;
self.run_workers_phase(inventory, topology).await?;
self.run_sanity_phase(inventory, topology).await?;
self.run_report_phase(inventory, topology).await?;
instrument(HarmonyEvent::HarmonyFinished).ok();
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"OKD installation pipeline completed".into(),
))
}
}
// -------------------------------------------------------------------------------------------------
// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
// - This score exposes/ensures the default inventory assets and waits for discoveries.
// - No early bonding. Simple access DHCP.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
struct OKDSetup01InventoryScore {}
impl Score<HAClusterTopology> for OKDSetup01InventoryScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup01InventoryInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup01InventoryScore".to_string()
}
}
#[derive(Debug, Clone)]
struct OKDSetup01InventoryInterpret {
score: OKDSetup01InventoryScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup01InventoryInterpret {
pub fn new(score: OKDSetup01InventoryScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup01InventoryInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup01Inventory")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
info!(
"Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`"
);
LaunchDiscoverInventoryAgentScore {
discovery_timeout: Some(60),
}
.interpret(inventory, topology)
.await?;
let bootstrap_host: PhysicalHost;
let host_repo = InventoryRepositoryFactory::build().await?;
loop {
let all_hosts = host_repo.get_all_hosts().await?;
if all_hosts.is_empty() {
warn!("No discovered hosts found yet. Waiting for hosts to appear...");
// Sleep to avoid spamming the user and logs while waiting for nodes.
tokio::time::sleep(std::time::Duration::from_secs(5));
continue;
}
let ans = inquire::Select::new(
"Select the node to be used as the bootstrap node:",
all_hosts,
)
.with_help_message("Press Esc to refresh the list of discovered hosts")
.prompt();
match ans {
Ok(choice) => {
info!("Selected {} as the bootstrap node.", choice.summary());
host_repo
.save_role_mapping(&HostRole::Bootstrap, &choice)
.await?;
bootstrap_host = choice;
break;
}
Err(inquire::InquireError::OperationCanceled) => {
info!("Refresh requested. Fetching list of discovered hosts again...");
continue;
}
Err(e) => {
error!("Failed to select bootstrap node: {}", e);
return Err(InterpretError::new(format!(
"Could not select host : {}",
e.to_string()
)));
}
}
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Found and assigned bootstrap node: {}",
bootstrap_host.summary()
),
))
// info!(
// "Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`"
// );
// LaunchDiscoverInventoryAgentScore {
// discovery_timeout: Some(60),
// }
// .interpret(inventory, topology)
// .await?;
//
// // TODO write a loop
// let bootstrap_host: PhysicalHost;
// let mut found_bootstrap_host = false;
// let host_repo = InventoryRepositoryFactory::build().await?;
// while !found_bootstrap_host {
// let all_hosts = host_repo.get_all_hosts().await?;
// // TODO use inquire to select among the current hosts, tell the user to cancel if he
// // wants to update the list. I believe inquire::Select is the correct option here
// //
// // The options are all_hosts, all_hosts is of type Vec<PhysicalHost> and PhysicalHost
// // has a human friendly `summary() -> String` method that is perfect to have the user
// // choose
// //
// // once the user has chosen one, call host_repo.save_role_mapping(Role::Bootstrap,
// // host.id).await?;
// bootstrap_host = todo!();
// }
//
// Ok(Outcome::new(
// InterpretStatus::SUCCESS,
// format!("Found bootstrap node : {}", bootstrap_host.summary()),
// ))
}
}
// -------------------------------------------------------------------------------------------------
// Step 02: Bootstrap
// - Select bootstrap node (from discovered set).
// - Render per-MAC iPXE pointing to OKD 4.19 SCOS live assets + bootstrap ignition.
// - Reboot the host via SSH and wait for bootstrap-complete.
// - No bonding at this stage unless absolutely required; prefer persistence via MC later.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
struct OKDSetup02BootstrapScore {
public_domain: String,
internal_domain: String,
}
impl Score<HAClusterTopology> for OKDSetup02BootstrapScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup02BootstrapInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup02BootstrapScore".to_string()
}
}
#[derive(Debug, Clone)]
struct OKDSetup02BootstrapInterpret {
score: OKDSetup02BootstrapScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup02BootstrapInterpret {
pub fn new(score: OKDSetup02BootstrapScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
fn get_bootstrap_node<'a>(&self, inventory: &'a Inventory) -> &'a PhysicalHost {
inventory
.worker_host
.first()
.expect("At least one worker host is required to be used as bootstrap node")
}
async fn configure_host_binding(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
let binding = HostBinding {
logical_host: topology.bootstrap_host.clone(),
physical_host: self.get_bootstrap_node(inventory).clone(),
};
info!("Configuring host binding for bootstrap node {binding:?}");
DhcpHostBindingScore {
host_binding: vec![binding],
}
.interpret(inventory, topology)
.await?;
Ok(())
}
async fn render_per_mac_pxe(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<(), InterpretError> {
// Placeholder: use Harmony templates to emit {MAC}.ipxe selecting SCOS live + bootstrap ignition.
info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node");
let bootstrap_node = self.get_bootstrap_node(inventory);
IPxeMacBootFileScore {
mac_address: bootstrap_node.get_mac_address(),
content: todo!("templace for bootstrap node"),
}
.interpret(inventory, topology)
.await?;
Ok(())
}
async fn reboot_target(&self) -> Result<(), InterpretError> {
// Placeholder: ssh reboot using the inventory ephemeral key
info!("[Bootstrap] Rebooting bootstrap node via SSH");
Ok(())
}
async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> {
// Placeholder: wait-for bootstrap-complete
info!("[Bootstrap] Waiting for bootstrap-complete …");
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup02Bootstrap")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.configure_host_binding(inventory, topology).await?;
self.render_per_mac_pxe(inventory, topology).await?;
self.reboot_target().await?;
self.wait_for_bootstrap_complete().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Bootstrap phase complete".into(),
))
}
}
// -------------------------------------------------------------------------------------------------
// Step 03: Control Plane
// - Render per-MAC PXE & ignition for cp0/cp1/cp2.
// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
struct OKDSetup03ControlPlaneScore {}
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup03ControlPlaneScore".to_string()
}
}
#[derive(Debug, Clone)]
struct OKDSetup03ControlPlaneInterpret {
score: OKDSetup03ControlPlaneScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup03ControlPlaneInterpret {
pub fn new(score: OKDSetup03ControlPlaneScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
info!("[ControlPlane] Rendering per-MAC PXE for masters and rebooting");
Ok(())
}
async fn persist_network_bond(&self) -> Result<(), InterpretError> {
// Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP");
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup03ControlPlane")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.render_and_reboot().await?;
self.persist_network_bond().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Control plane provisioned".into(),
))
}
}
// -------------------------------------------------------------------------------------------------
// Step 04: Workers
// - Render per-MAC PXE & ignition for workers; join nodes.
// - Persist bonding via MC/NNCP as required (same approach as masters).
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
struct OKDSetup04WorkersScore {}
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup04WorkersScore".to_string()
}
}
#[derive(Debug, Clone)]
struct OKDSetup04WorkersInterpret {
score: OKDSetup04WorkersScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup04WorkersInterpret {
pub fn new(score: OKDSetup04WorkersScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup04Workers")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.render_and_reboot().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Workers provisioned".into(),
))
}
}
// -------------------------------------------------------------------------------------------------
// Step 05: Sanity Check
// - Validate API reachability, ClusterOperators, ingress, and SDN status.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
struct OKDSetup05SanityCheckScore {}
impl Score<HAClusterTopology> for OKDSetup05SanityCheckScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup05SanityCheckInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup05SanityCheckScore".to_string()
}
}
#[derive(Debug, Clone)]
struct OKDSetup05SanityCheckInterpret {
score: OKDSetup05SanityCheckScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup05SanityCheckInterpret {
pub fn new(score: OKDSetup05SanityCheckScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn run_checks(&self) -> Result<(), InterpretError> {
info!("[Sanity] Checking API, COs, Ingress, and SDN health …");
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup05SanityCheck")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.run_checks().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Sanity checks passed".into(),
))
}
}
// -------------------------------------------------------------------------------------------------
// Step 06: Installation Report
// - Emit JSON and concise human summary of nodes, roles, versions, and health.
// -------------------------------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, new)]
struct OKDSetup06InstallationReportScore {
public_domain: String,
internal_domain: String,
}
impl Score<HAClusterTopology> for OKDSetup06InstallationReportScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDSetup06InstallationReportInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDSetup06InstallationReportScore".to_string()
}
}
#[derive(Debug, Clone)]
struct OKDSetup06InstallationReportInterpret {
score: OKDSetup06InstallationReportScore,
version: Version,
status: InterpretStatus,
}
impl OKDSetup06InstallationReportInterpret {
pub fn new(score: OKDSetup06InstallationReportScore) -> Self {
let version = Version::from("1.0.0").unwrap();
Self {
version,
score,
status: InterpretStatus::QUEUED,
}
}
async fn generate(&self) -> Result<(), InterpretError> {
info!(
"[Report] Generating installation report for {} / {}",
self.score.public_domain, self.score.internal_domain
);
Ok(())
}
}
#[async_trait]
impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OKDSetup06InstallationReport")
}
fn get_version(&self) -> Version {
self.version.clone()
}
fn get_status(&self) -> InterpretStatus {
self.status.clone()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError> {
self.generate().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Installation report generated".into(),
))
}
}

View File

@@ -16,31 +16,29 @@ use crate::{
use harmony_types::id::Id;
#[derive(Debug, new, Clone, Serialize)]
pub struct OKDIpxeScore {
pub struct OkdIpxeScore {
pub kickstart_filename: String,
pub harmony_inventory_agent: String,
pub cluster_pubkey: FileContent,
pub cluster_pubkey_filename: String,
}
impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Score<T> for OKDIpxeScore {
impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Score<T> for OkdIpxeScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OKDIpxeInterpret::new(self.clone()))
Box::new(IpxeInterpret::new(self.clone()))
}
fn name(&self) -> String {
"OKDipxeScore".to_string()
"OkdIpxeScore".to_string()
}
}
#[derive(Debug, new, Clone)]
pub struct OKDIpxeInterpret {
score: OKDIpxeScore,
pub struct IpxeInterpret {
score: OkdIpxeScore,
}
#[async_trait]
impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T>
for OKDIpxeInterpret
{
impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> for IpxeInterpret {
async fn execute(
&self,
inventory: &Inventory,
@@ -61,7 +59,6 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T>
let scores: Vec<Box<dyn Score<T>>> = vec![
Box::new(DhcpScore {
host_binding: vec![],
domain: None,
next_server: Some(topology.get_gateway()),
boot_filename: None,
filename: Some("undionly.kpxe".to_string()),
@@ -73,7 +70,6 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T>
files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()),
}),
Box::new(StaticFilesHttpScore {
remote_path: None,
// TODO The current russh based copy is way too slow, check for a lib update or use scp
// when available
//
@@ -95,7 +91,7 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T>
content: InventoryKickstartTpl {
gateway_ip: &gateway_ip,
harmony_inventory_agent: &self.score.harmony_inventory_agent,
cluster_pubkey_filename: &self.score.cluster_pubkey.path.to_string(),
cluster_pubkey_filename: &self.score.cluster_pubkey_filename,
}
.to_string(),
},
@@ -107,7 +103,6 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T>
}
.to_string(),
},
self.score.cluster_pubkey.clone(),
],
}),
];
@@ -123,7 +118,6 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T>
Err(e) => return Err(e),
};
}
inquire::Confirm::new(&format!("Execute the copy : `scp -r data/pxe/okd/http_files/* root@{}:/usr/local/http/` and confirm when done to continue", HttpServer::get_ip(topology))).prompt().expect("Prompt error");
Ok(Outcome::success("Ipxe installed".to_string()))
}

View File

@@ -8,7 +8,7 @@ use crate::{
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
LoadBalancerService, SSL, Topology,
LoadBalancerService, Topology,
},
};
@@ -62,7 +62,6 @@ impl OKDLoadBalancerScore {
"/readyz".to_string(),
HttpMethod::GET,
HttpStatusCode::Success2xx,
SSL::SSL,
)),
},
];

View File

@@ -1,9 +1,3 @@
mod bootstrap_01_prepare;
mod bootstrap_02_bootstrap;
mod bootstrap_03_control_plane;
mod bootstrap_04_workers;
mod bootstrap_05_sanity_check;
mod bootstrap_06_installation_report;
pub mod bootstrap_dhcp;
pub mod bootstrap_load_balancer;
pub mod dhcp;
@@ -11,11 +5,4 @@ pub mod dns;
pub mod installation;
pub mod ipxe;
pub mod load_balancer;
pub mod templates;
pub mod upgrade;
pub use bootstrap_01_prepare::*;
pub use bootstrap_02_bootstrap::*;
pub use bootstrap_03_control_plane::*;
pub use bootstrap_04_workers::*;
pub use bootstrap_05_sanity_check::*;
pub use bootstrap_06_installation_report::*;

View File

@@ -1,20 +0,0 @@
use askama::Template;
#[derive(Template)]
#[template(path = "okd/install-config.yaml.j2")]
pub struct InstallConfigYaml<'a> {
pub cluster_domain: &'a str,
pub pull_secret: &'a str,
pub ssh_public_key: &'a str,
pub cluster_name: &'a str,
}
#[derive(Template)]
#[template(path = "okd/bootstrap.ipxe.j2")]
pub struct BootstrapIpxeTpl<'a> {
pub http_ip: &'a str,
pub scos_path: &'a str,
pub installation_device: &'a str,
pub ignition_http_path: &'a str,
pub ignition_file_name: &'static str,
}

Some files were not shown because too many files have changed in this diff Show More