Compare commits
14 Commits
545a865f3d
...
feat/postg
| Author | SHA1 | Date | |
|---|---|---|---|
| 142300802d | |||
| 2254641f3d | |||
| b61e4f9a96 | |||
| 2e367d88d4 | |||
| 9edc42a665 | |||
| f242aafebb | |||
| 3e14ebd62c | |||
| 1b19638df4 | |||
| d39b1957cd | |||
| 357ca93d90 | |||
| 8103932f23 | |||
| 9617e1cfde | |||
| a953284386 | |||
| bfde5f58ed |
15
Cargo.lock
generated
15
Cargo.lock
generated
@@ -1835,6 +1835,21 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-operatorhub-catalogsource"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-opnsense"
|
name = "example-opnsense"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
|||||||
114
adr/015-higher-order-topologies.md
Normal file
114
adr/015-higher-order-topologies.md
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
# Architecture Decision Record: Higher-Order Topologies
|
||||||
|
|
||||||
|
**Initial Author:** Jean-Gabriel Gill-Couture
|
||||||
|
**Initial Date:** 2025-12-08
|
||||||
|
**Last Updated Date:** 2025-12-08
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`).
|
||||||
|
|
||||||
|
**Higher-Order Topologies** (e.g., `FailoverTopology<T>`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`).
|
||||||
|
|
||||||
|
Naive design requires manual `impl Capability for HigherOrderTopology<T>` *per T per capability*, causing:
|
||||||
|
- **Impl explosion**: N topologies × M capabilities = N×M boilerplate.
|
||||||
|
- **ISP violation**: Topologies forced to impl unrelated capabilities.
|
||||||
|
- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order.
|
||||||
|
- **Barrier to extension**: Users can't easily add topologies without todos/panics.
|
||||||
|
|
||||||
|
This makes scaling Harmony impractical as ecosystem grows.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration:
|
||||||
|
|
||||||
|
````rust
|
||||||
|
/// Higher-Order Topology: Orchestrates capabilities across sub-topologies.
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
/// Primary sub-topology.
|
||||||
|
primary: T,
|
||||||
|
/// Replica sub-topology.
|
||||||
|
replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`.
|
||||||
|
/// Delegates to primary for queries; orchestrates deploy across both.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Deploy primary; extract certs/endpoint;
|
||||||
|
// deploy replica with pg_basebackup + TLS passthrough.
|
||||||
|
// (Full impl logged/elaborated.)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delegate queries to primary.
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
self.primary.get_replication_certs(cluster_name).await
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Similarly for other capabilities.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Docker> Docker for FailoverTopology<T> {
|
||||||
|
// Failover Docker orchestration.
|
||||||
|
}
|
||||||
|
````
|
||||||
|
|
||||||
|
**Key properties:**
|
||||||
|
- **Auto-derivation**: `Failover<K8sAnywhere>` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`.
|
||||||
|
- **No boilerplate**: One blanket impl per capability *per higher-order type*.
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
|
||||||
|
- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost.
|
||||||
|
- **Compile-time safety**: Missing `T: Capability` → compile error (no panics).
|
||||||
|
- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works.
|
||||||
|
- **ISP-respecting**: Capabilities only surface if sub-topology provides.
|
||||||
|
- **Centralized logic**: Orchestration (e.g., cert propagation) in one place.
|
||||||
|
|
||||||
|
**Example usage:**
|
||||||
|
````rust
|
||||||
|
// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG
|
||||||
|
let pg_failover: FailoverTopology<K8sAnywhereTopology> = ...;
|
||||||
|
pg_failover.deploy_pg(config).await;
|
||||||
|
|
||||||
|
// ✅ Works: LinuxHost: Docker → Failover provides failover Docker
|
||||||
|
let docker_failover: FailoverTopology<LinuxHostTopology> = ...;
|
||||||
|
docker_failover.deploy_docker(...).await;
|
||||||
|
|
||||||
|
// ❌ Compile fail: K8sAnywhere !: Docker
|
||||||
|
let invalid: FailoverTopology<K8sAnywhereTopology>;
|
||||||
|
invalid.deploy_docker(...); // `T: Docker` bound unsatisfied
|
||||||
|
````
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover<AWSTopology>: PostgreSQL`.
|
||||||
|
- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`).
|
||||||
|
- **Observable**: Logs trace every step.
|
||||||
|
|
||||||
|
**Cons:**
|
||||||
|
- **Monomorphization**: Generics generate code per T (mitigated: few Ts).
|
||||||
|
- **Delegation opacity**: Relies on rustdoc/logs for internals.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
| Approach | Pros | Cons |
|
||||||
|
|----------|------|------|
|
||||||
|
| **Manual per-T impls**<br>`impl PG for Failover<K8s> {..}`<br>`impl PG for Failover<Linux> {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. |
|
||||||
|
| **Dynamic trait objects**<br>`Box<dyn AnyCapability>` | Runtime flex | Perf hit; type erasure; error-prone dispatch. |
|
||||||
|
| **Mega-topology trait**<br>All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. |
|
||||||
|
| **Registry dispatch**<br>Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. |
|
||||||
|
|
||||||
|
**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition.
|
||||||
|
|
||||||
|
## Additional Notes
|
||||||
|
|
||||||
|
- Applies to `MultisiteTopology<T>`, `ShardedTopology<T>`, etc.
|
||||||
|
- `FailoverTopology` in `failover.rs` is first implementation.
|
||||||
153
adr/015-higher-order-topologies/example.rs
Normal file
153
adr/015-higher-order-topologies/example.rs
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
//! Example of Higher-Order Topologies in Harmony.
|
||||||
|
//! Demonstrates how `FailoverTopology<T>` automatically provides failover for *any* capability
|
||||||
|
//! supported by a sub-topology `T` via blanket trait impls.
|
||||||
|
//!
|
||||||
|
//! Key insight: No manual impls per T or capability -- scales effortlessly.
|
||||||
|
//! Users can:
|
||||||
|
//! - Write new `Topology` (impl capabilities on a struct).
|
||||||
|
//! - Compose with `FailoverTopology` (gets capabilities if T has them).
|
||||||
|
//! - Compile fails if capability missing (safety).
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use tokio;
|
||||||
|
|
||||||
|
/// Capability trait: Deploy and manage PostgreSQL.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait PostgreSQL {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Capability trait: Deploy Docker.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Docker {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration for PostgreSQL deployments.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct PostgreSQLConfig;
|
||||||
|
|
||||||
|
/// Replication certificates.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ReplicationCerts;
|
||||||
|
|
||||||
|
/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL).
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct K8sAnywhereTopology;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl PostgreSQL for K8sAnywhereTopology {
|
||||||
|
async fn deploy(&self, _config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Real impl: Use k8s helm chart, operator, etc.
|
||||||
|
Ok("K8sAnywhere PostgreSQL deployed".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_replication_certs(&self, _cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
Ok(ReplicationCerts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Concrete topology: Linux Host (supports Docker).
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct LinuxHostTopology;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Docker for LinuxHostTopology {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String> {
|
||||||
|
// Real impl: Install/configure Docker on host.
|
||||||
|
Ok("LinuxHost Docker deployed".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica).
|
||||||
|
/// Automatically derives *all* capabilities of `T` with failover orchestration.
|
||||||
|
///
|
||||||
|
/// - If `T: PostgreSQL`, then `FailoverTopology<T>: PostgreSQL` (blanket impl).
|
||||||
|
/// - Same for `Docker`, etc. No boilerplate!
|
||||||
|
/// - Compile-time safe: Missing `T: Capability` → error.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
/// Primary sub-topology.
|
||||||
|
pub primary: T,
|
||||||
|
/// Replica sub-topology.
|
||||||
|
pub replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL.
|
||||||
|
/// Delegates reads to primary; deploys to both.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: PostgreSQL + Send + Sync + Clone> PostgreSQL for FailoverTopology<T> {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup).
|
||||||
|
let primary_result = self.primary.deploy(config).await?;
|
||||||
|
let replica_result = self.replica.deploy(config).await?;
|
||||||
|
Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
// Delegate to primary (replica follows).
|
||||||
|
self.primary.get_replication_certs(cluster_name).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blanket impl: Failover Docker if T provides Docker.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Docker + Send + Sync + Clone> Docker for FailoverTopology<T> {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String> {
|
||||||
|
// Orchestrate across primary + replica.
|
||||||
|
let primary_result = self.primary.deploy_docker().await?;
|
||||||
|
let replica_result = self.replica.deploy_docker().await?;
|
||||||
|
Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let config = PostgreSQLConfig;
|
||||||
|
|
||||||
|
println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ===");
|
||||||
|
let pg_failover = FailoverTopology {
|
||||||
|
primary: K8sAnywhereTopology,
|
||||||
|
replica: K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
let result = pg_failover.deploy(&config).await.unwrap();
|
||||||
|
println!("Result: {}", result);
|
||||||
|
|
||||||
|
println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ===");
|
||||||
|
let docker_failover = FailoverTopology {
|
||||||
|
primary: LinuxHostTopology,
|
||||||
|
replica: LinuxHostTopology,
|
||||||
|
};
|
||||||
|
let result = docker_failover.deploy_docker().await.unwrap();
|
||||||
|
println!("Result: {}", result);
|
||||||
|
|
||||||
|
println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ===");
|
||||||
|
// let invalid = FailoverTopology {
|
||||||
|
// primary: K8sAnywhereTopology,
|
||||||
|
// replica: K8sAnywhereTopology,
|
||||||
|
// };
|
||||||
|
// invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied!
|
||||||
|
// Very clear error message :
|
||||||
|
// error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology<K8sAnywhereTopology>`, but its trait bounds were not satisfied
|
||||||
|
// --> src/main.rs:90:9
|
||||||
|
// |
|
||||||
|
// 4 | pub struct FailoverTopology<T> {
|
||||||
|
// | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology<K8sAnywhereTopology>: Docker`
|
||||||
|
// ...
|
||||||
|
// 37 | struct K8sAnywhereTopology;
|
||||||
|
// | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker`
|
||||||
|
// ...
|
||||||
|
// 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied
|
||||||
|
// | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology<K8sAnywhereTopology>` due to unsatisfied trait bounds
|
||||||
|
// |
|
||||||
|
// note: trait bound `K8sAnywhereTopology: Docker` was not satisfied
|
||||||
|
// --> src/main.rs:61:9
|
||||||
|
// |
|
||||||
|
// 61 | impl<T: Docker + Send + Sync> Docker for FailoverTopology<T> {
|
||||||
|
// | ^^^^^^ ------ -------------------
|
||||||
|
// | |
|
||||||
|
// | unsatisfied trait bound introduced here
|
||||||
|
// note: the trait `Docker` must be implemented
|
||||||
|
}
|
||||||
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Design Document: Harmony PostgreSQL Module
|
# Design Document: Harmony PostgreSQL Module
|
||||||
|
|
||||||
**Status:** Draft
|
**Status:** Draft
|
||||||
**Last Updated:** 2023-10-27
|
**Last Updated:** 2025-12-01
|
||||||
**Context:** Multi-site Data Replication & Orchestration
|
**Context:** Multi-site Data Replication & Orchestration
|
||||||
|
|
||||||
## 1. Overview
|
## 1. Overview
|
||||||
|
|||||||
18
examples/operatorhub_catalog/Cargo.toml
Normal file
18
examples/operatorhub_catalog/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-operatorhub-catalogsource"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
22
examples/operatorhub_catalog/src/main.rs
Normal file
22
examples/operatorhub_catalog/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
|
||||||
|
let cnpg_operator = CloudNativePgOperatorScore::default();
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(operatorhub_catalog), Box::new(cnpg_operator)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
18
examples/postgresql/Cargo.toml
Normal file
18
examples/postgresql/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-postgresql"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
22
examples/postgresql/src/main.rs
Normal file
22
examples/postgresql/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::Inventory, modules::postgresql::PostgreSQLScore, topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let postgresql = PostgreSQLScore {
|
||||||
|
name: "harmony-postgres-example".to_string(), // Override default name
|
||||||
|
namespace: "harmony-postgres-example".to_string(),
|
||||||
|
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||||
|
// "default" namespace, 1 instance, 1Gi storage
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(postgresql)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
19
harmony/src/domain/topology/failover.rs
Normal file
19
harmony/src/domain/topology/failover.rs
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::topology::{PreparationError, PreparationOutcome, Topology};
|
||||||
|
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
pub primary: T,
|
||||||
|
pub replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Send + Sync> Topology for FailoverTopology<T> {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"FailoverTopology"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
|
mod failover;
|
||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
|
pub use failover::*;
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
mod http;
|
mod http;
|
||||||
@@ -13,7 +15,7 @@ pub use k8s_anywhere::*;
|
|||||||
pub use localhost::*;
|
pub use localhost::*;
|
||||||
pub mod k8s;
|
pub mod k8s;
|
||||||
mod load_balancer;
|
mod load_balancer;
|
||||||
mod router;
|
pub mod router;
|
||||||
mod tftp;
|
mod tftp;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
pub use ha_cluster::*;
|
pub use ha_cluster::*;
|
||||||
|
|||||||
@@ -1,11 +1,19 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
|
||||||
use super::{IpAddress, LogicalHost};
|
use super::{IpAddress, LogicalHost};
|
||||||
|
|
||||||
|
/// Basic network router abstraction (L3 IP routing/gateway).
|
||||||
|
/// Distinguished from TlsRouter (L4 TLS passthrough).
|
||||||
pub trait Router: Send + Sync {
|
pub trait Router: Send + Sync {
|
||||||
|
/// Gateway IP address for this subnet/router.
|
||||||
fn get_gateway(&self) -> IpAddress;
|
fn get_gateway(&self) -> IpAddress;
|
||||||
|
|
||||||
|
/// CIDR block managed by this router.
|
||||||
fn get_cidr(&self) -> Ipv4Cidr;
|
fn get_cidr(&self) -> Ipv4Cidr;
|
||||||
|
|
||||||
|
/// Logical host associated with this router.
|
||||||
fn get_host(&self) -> LogicalHost;
|
fn get_host(&self) -> LogicalHost;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,3 +46,73 @@ impl Router for UnmanagedRouter {
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
|
||||||
|
/// Desired state config for a TLS passthrough route.
|
||||||
|
/// Forwards external TLS (port 443) → backend service:target_port (no termination at router).
|
||||||
|
/// Inspired by CNPG multisite: exposes `-rw`/`-ro` services publicly via OKD Route/HAProxy/K8s
|
||||||
|
/// Gateway etc.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// use harmony::topology::router::TlsRoute;
|
||||||
|
/// let postgres_rw = TlsRoute {
|
||||||
|
/// hostname: "postgres-cluster-example.public.domain.io".to_string(),
|
||||||
|
/// backend: "postgres-cluster-example-rw".to_string(), // k8s Service or HAProxy upstream
|
||||||
|
/// target_port: 5432,
|
||||||
|
/// };
|
||||||
|
/// ```
|
||||||
|
pub struct TlsRoute {
|
||||||
|
/// Public hostname clients connect to (TLS SNI, port 443 implicit).
|
||||||
|
/// Router matches this for passthrough forwarding.
|
||||||
|
pub hostname: String,
|
||||||
|
|
||||||
|
/// Backend/host identifier (k8s Service, HAProxy upstream, IP/FQDN, etc.).
|
||||||
|
pub backend: String,
|
||||||
|
|
||||||
|
/// Backend TCP port (Postgres: 5432).
|
||||||
|
pub target_port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Installs and queries TLS passthrough routes (L4 TCP/SNI forwarding, no TLS termination).
|
||||||
|
/// Agnostic to impl: OKD Route, AWS NLB+HAProxy, k3s Envoy Gateway, Apache ProxyPass.
|
||||||
|
/// Used by PostgreSQL capability to expose CNPG clusters multisite (site1 → site2 replication).
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
/// ```ignore
|
||||||
|
/// use harmony::topology::router::TlsRoute;
|
||||||
|
/// // After CNPG deploy, expose RW endpoint
|
||||||
|
/// async fn route() {
|
||||||
|
/// let topology = okd_topology();
|
||||||
|
/// let route = TlsRoute { /* ... */ };
|
||||||
|
/// topology.install_route(route).await; // OKD Route, HAProxy reload, etc.
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[async_trait]
|
||||||
|
pub trait TlsRouter: Send + Sync {
|
||||||
|
/// Provisions the route (idempotent where possible).
|
||||||
|
/// Example: OKD Route{ host, to: backend:target_port, tls: {passthrough} };
|
||||||
|
/// HAProxy frontend→backend \"postgres-upstream\".
|
||||||
|
async fn install_route(&self, config: TlsRoute) -> Result<(), String>;
|
||||||
|
|
||||||
|
/// Installed route's public hostname.
|
||||||
|
fn hostname(&self) -> String;
|
||||||
|
|
||||||
|
/// Installed route's backend identifier.
|
||||||
|
fn backend(&self) -> String;
|
||||||
|
|
||||||
|
/// Installed route's backend port.
|
||||||
|
fn target_port(&self) -> u16;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for dyn TlsRouter {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_fmt(format_args!(
|
||||||
|
"TlsRouter[hostname={}, backend={}:{}]",
|
||||||
|
self.hostname(),
|
||||||
|
self.backend(),
|
||||||
|
self.target_port()
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,6 +17,12 @@ use crate::{
|
|||||||
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// TODO document properly the non-intuitive behavior or "roll forward only" of nmstate in general
|
||||||
|
/// It is documented in nmstate official doc, but worth mentionning here :
|
||||||
|
///
|
||||||
|
/// - You create a bond, nmstate will apply it
|
||||||
|
/// - You delete de bond from nmstate, it will NOT delete it
|
||||||
|
/// - To delete it you have to update it with configuration set to null
|
||||||
pub struct OpenShiftNmStateNetworkManager {
|
pub struct OpenShiftNmStateNetworkManager {
|
||||||
k8s_client: Arc<K8sClient>,
|
k8s_client: Arc<K8sClient>,
|
||||||
}
|
}
|
||||||
@@ -31,6 +37,7 @@ impl std::fmt::Debug for OpenShiftNmStateNetworkManager {
|
|||||||
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
||||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||||
debug!("Installing NMState controller...");
|
debug!("Installing NMState controller...");
|
||||||
|
// TODO use operatorhub maybe?
|
||||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||||
").unwrap(), Some("nmstate"))
|
").unwrap(), Some("nmstate"))
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@@ -0,0 +1,157 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use k8s_openapi::{
|
||||||
|
api::core::v1::{Affinity, Toleration},
|
||||||
|
apimachinery::pkg::apis::meta::v1::ObjectMeta,
|
||||||
|
};
|
||||||
|
use kube::CustomResource;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "CatalogSource",
|
||||||
|
plural = "catalogsources",
|
||||||
|
namespaced = true,
|
||||||
|
schema = "disabled"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct CatalogSourceSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub address: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config_map: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub description: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub display_name: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub grpc_pod_config: Option<GrpcPodConfig>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub icon: Option<Icon>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub image: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority: Option<i64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub publisher: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub run_as_root: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub secrets: Option<Vec<String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub source_type: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub update_strategy: Option<UpdateStrategy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrpcPodConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub affinity: Option<Affinity>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub extract_content: Option<ExtractContent>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub memory_target: Option<Value>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node_selector: Option<BTreeMap<String, String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority_class_name: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub security_context_config: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tolerations: Option<Vec<Toleration>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ExtractContent {
|
||||||
|
pub cache_dir: String,
|
||||||
|
pub catalog_dir: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Icon {
|
||||||
|
pub base64data: String,
|
||||||
|
pub mediatype: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct UpdateStrategy {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub registry_poll: Option<RegistryPoll>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RegistryPoll {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub interval: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CatalogSource {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: CatalogSourceSpec {
|
||||||
|
address: None,
|
||||||
|
config_map: None,
|
||||||
|
description: None,
|
||||||
|
display_name: None,
|
||||||
|
grpc_pod_config: None,
|
||||||
|
icon: None,
|
||||||
|
image: None,
|
||||||
|
priority: None,
|
||||||
|
publisher: None,
|
||||||
|
run_as_root: None,
|
||||||
|
secrets: None,
|
||||||
|
source_type: None,
|
||||||
|
update_strategy: None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CatalogSourceSpec {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
address: None,
|
||||||
|
config_map: None,
|
||||||
|
description: None,
|
||||||
|
display_name: None,
|
||||||
|
grpc_pod_config: None,
|
||||||
|
icon: None,
|
||||||
|
image: None,
|
||||||
|
priority: None,
|
||||||
|
publisher: None,
|
||||||
|
run_as_root: None,
|
||||||
|
secrets: None,
|
||||||
|
source_type: None,
|
||||||
|
update_strategy: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
mod catalogsources_operators_coreos_com;
|
||||||
|
pub use catalogsources_operators_coreos_com::*;
|
||||||
|
mod subscriptions_operators_coreos_com;
|
||||||
|
pub use subscriptions_operators_coreos_com::*;
|
||||||
@@ -0,0 +1,68 @@
|
|||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use kube::CustomResource;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "Subscription",
|
||||||
|
plural = "subscriptions",
|
||||||
|
namespaced = true,
|
||||||
|
schema = "disabled"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SubscriptionSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub channel: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config: Option<SubscriptionConfig>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub install_plan_approval: Option<String>,
|
||||||
|
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
pub source: String,
|
||||||
|
|
||||||
|
pub source_namespace: String,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub starting_csv: Option<String>,
|
||||||
|
}
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SubscriptionConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub env: Option<Vec<k8s_openapi::api::core::v1::EnvVar>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node_selector: Option<std::collections::BTreeMap<String, String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tolerations: Option<Vec<k8s_openapi::api::core::v1::Toleration>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Subscription {
|
||||||
|
fn default() -> Self {
|
||||||
|
Subscription {
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: SubscriptionSpec::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SubscriptionSpec {
|
||||||
|
fn default() -> SubscriptionSpec {
|
||||||
|
SubscriptionSpec {
|
||||||
|
name: String::new(),
|
||||||
|
source: String::new(),
|
||||||
|
source_namespace: String::new(),
|
||||||
|
channel: None,
|
||||||
|
config: None,
|
||||||
|
install_plan_approval: None,
|
||||||
|
starting_csv: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
mod operatorhub;
|
||||||
|
pub use operatorhub::*;
|
||||||
|
pub mod crd;
|
||||||
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// Write operatorhub catalog score
|
||||||
|
// for now this will only support on OKD with the default catalog and operatorhub setup and does not verify OLM state or anything else. Very opinionated and bare-bones to start
|
||||||
|
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::interpret::Interpret;
|
||||||
|
use crate::modules::k8s::apps::crd::{
|
||||||
|
CatalogSource, CatalogSourceSpec, RegistryPoll, UpdateStrategy,
|
||||||
|
};
|
||||||
|
use crate::modules::k8s::resource::K8sResourceScore;
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
|
||||||
|
/// Installs the CatalogSource in a cluster which already has the required services and CRDs installed.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use harmony::modules::k8s::apps::OperatorHubCatalogSourceScore;
|
||||||
|
///
|
||||||
|
/// let score = OperatorHubCatalogSourceScore::default();
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Required services:
|
||||||
|
/// - catalog-operator
|
||||||
|
/// - olm-operator
|
||||||
|
///
|
||||||
|
/// They are installed by default with OKD/Openshift
|
||||||
|
///
|
||||||
|
/// **Warning** : this initial implementation does not manage the dependencies. They must already
|
||||||
|
/// exist in the cluster.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct OperatorHubCatalogSourceScore {
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: String,
|
||||||
|
pub image: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OperatorHubCatalogSourceScore {
|
||||||
|
pub fn new(name: &str, namespace: &str, image: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
name: name.to_string(),
|
||||||
|
namespace: namespace.to_string(),
|
||||||
|
image: image.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OperatorHubCatalogSourceScore {
|
||||||
|
/// This default implementation will create this k8s resource :
|
||||||
|
///
|
||||||
|
/// ```yaml
|
||||||
|
/// apiVersion: operators.coreos.com/v1alpha1
|
||||||
|
/// kind: CatalogSource
|
||||||
|
/// metadata:
|
||||||
|
/// name: operatorhubio-catalog
|
||||||
|
/// namespace: openshift-marketplace
|
||||||
|
/// spec:
|
||||||
|
/// sourceType: grpc
|
||||||
|
/// image: quay.io/operatorhubio/catalog:latest
|
||||||
|
/// displayName: Operatorhub Operators
|
||||||
|
/// publisher: OperatorHub.io
|
||||||
|
/// updateStrategy:
|
||||||
|
/// registryPoll:
|
||||||
|
/// interval: 60m
|
||||||
|
/// ```
|
||||||
|
fn default() -> Self {
|
||||||
|
OperatorHubCatalogSourceScore {
|
||||||
|
name: "operatorhubio-catalog".to_string(),
|
||||||
|
namespace: "openshift-marketplace".to_string(),
|
||||||
|
image: "quay.io/operatorhubio/catalog:latest".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient> Score<T> for OperatorHubCatalogSourceScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
let metadata = ObjectMeta {
|
||||||
|
name: Some(self.name.clone()),
|
||||||
|
namespace: Some(self.namespace.clone()),
|
||||||
|
..ObjectMeta::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let spec = CatalogSourceSpec {
|
||||||
|
source_type: Some("grpc".to_string()),
|
||||||
|
image: Some(self.image.clone()),
|
||||||
|
display_name: Some("Operatorhub Operators".to_string()),
|
||||||
|
publisher: Some("OperatorHub.io".to_string()),
|
||||||
|
update_strategy: Some(UpdateStrategy {
|
||||||
|
registry_poll: Some(RegistryPoll {
|
||||||
|
interval: Some("60m".to_string()),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
..CatalogSourceSpec::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let catalog_source = CatalogSource {
|
||||||
|
metadata,
|
||||||
|
spec: spec,
|
||||||
|
};
|
||||||
|
|
||||||
|
K8sResourceScore::single(catalog_source, Some(self.namespace.clone())).create_interpret()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("OperatorHubCatalogSourceScore({})", self.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
pub mod apps;
|
||||||
pub mod deployment;
|
pub mod deployment;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
pub mod namespace;
|
pub mod namespace;
|
||||||
|
|||||||
@@ -11,8 +11,10 @@ pub mod k8s;
|
|||||||
pub mod lamp;
|
pub mod lamp;
|
||||||
pub mod load_balancer;
|
pub mod load_balancer;
|
||||||
pub mod monitoring;
|
pub mod monitoring;
|
||||||
|
pub mod network;
|
||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
|
pub mod postgresql;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
pub mod storage;
|
pub mod storage;
|
||||||
pub mod tenant;
|
pub mod tenant;
|
||||||
|
|||||||
2
harmony/src/modules/network/mod.rs
Normal file
2
harmony/src/modules/network/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
mod tls_router;
|
||||||
|
pub use tls_router::*;
|
||||||
99
harmony/src/modules/network/tls_router.rs
Normal file
99
harmony/src/modules/network/tls_router.rs
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::data::Version;
|
||||||
|
use crate::domain::topology::router::{TlsRoute, TlsRouter};
|
||||||
|
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||||
|
use crate::inventory::Inventory;
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
|
||||||
|
/// Score for provisioning a TLS passthrough route.
|
||||||
|
/// Exposes backend services via TLS passthrough (L4 TCP/SNI forwarding).
|
||||||
|
/// Agnostic to underlying router impl (OKD Route, HAProxy, Envoy, etc.).
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
/// ```
|
||||||
|
/// use harmony::modules::network::TlsRouterScore;
|
||||||
|
/// let score = TlsRouterScore::new("postgres-cluster-rw", "pg-rw.example.com", 5432);
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct TlsRouterScore {
|
||||||
|
/// Backend identifier (k8s Service, HAProxy upstream, IP/FQDN, etc.).
|
||||||
|
pub backend: String,
|
||||||
|
/// Public hostname clients connect to (TLS SNI, port 443 implicit).
|
||||||
|
pub hostname: String,
|
||||||
|
/// Backend TCP port.
|
||||||
|
pub target_port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TlsRouterScore {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
backend: "default-backend".to_string(),
|
||||||
|
hostname: "tls.default.public".to_string(),
|
||||||
|
target_port: 5432,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TlsRouterScore {
|
||||||
|
pub fn new(backend: &str, hostname: &str, target_port: u16) -> Self {
|
||||||
|
Self {
|
||||||
|
backend: backend.to_string(),
|
||||||
|
hostname: hostname.to_string(),
|
||||||
|
target_port,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Custom interpret: provisions the TLS passthrough route on the topology.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct TlsRouterInterpret {
|
||||||
|
tls_route: TlsRoute,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Interpret<T> for TlsRouterInterpret {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("TlsRouterInterpret")
|
||||||
|
}
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
async fn execute(&self, _inventory: &Inventory, topo: &T) -> Result<Outcome, InterpretError> {
|
||||||
|
topo.install_route(self.tls_route.clone())
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"TLS route installed: {} → {}:{}",
|
||||||
|
self.tls_route.hostname, self.tls_route.backend, self.tls_route.target_port
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Score<T> for TlsRouterScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
let tls_route = TlsRoute {
|
||||||
|
hostname: self.hostname.clone(),
|
||||||
|
backend: self.backend.clone(),
|
||||||
|
target_port: self.target_port,
|
||||||
|
};
|
||||||
|
Box::new(TlsRouterInterpret { tls_route })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"TlsRouterScore({}:{ } → {})",
|
||||||
|
self.backend, self.target_port, self.hostname
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,6 +12,7 @@ pub mod dns;
|
|||||||
pub mod installation;
|
pub mod installation;
|
||||||
pub mod ipxe;
|
pub mod ipxe;
|
||||||
pub mod load_balancer;
|
pub mod load_balancer;
|
||||||
|
pub mod route;
|
||||||
pub mod templates;
|
pub mod templates;
|
||||||
pub mod upgrade;
|
pub mod upgrade;
|
||||||
pub use bootstrap_01_prepare::*;
|
pub use bootstrap_01_prepare::*;
|
||||||
|
|||||||
85
harmony/src/modules/postgresql/capability.rs
Normal file
85
harmony/src/modules/postgresql/capability.rs
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::storage::StorageSize;
|
||||||
|
use serde::Serialize;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait PostgreSQL: Send + Sync {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||||
|
|
||||||
|
/// Extracts PostgreSQL-specific replication certs (PEM format) from a deployed primary cluster.
|
||||||
|
/// Abstracts away storage/retrieval details (e.g., secrets, files).
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
||||||
|
|
||||||
|
/// Gets the internal/private endpoint (e.g., k8s service FQDN:5432) for the cluster.
|
||||||
|
async fn get_endpoint(&self, cluster_name: &str) -> Result<PostgreSQLEndpoint, String>;
|
||||||
|
|
||||||
|
/// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough).
|
||||||
|
/// Returns None if no public endpoint (internal-only cluster).
|
||||||
|
/// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex
|
||||||
|
/// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait.
|
||||||
|
async fn get_public_endpoint(
|
||||||
|
&self,
|
||||||
|
cluster_name: &str,
|
||||||
|
) -> Result<Option<PostgreSQLEndpoint>, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct PostgreSQLConfig {
|
||||||
|
pub cluster_name: String,
|
||||||
|
pub instances: u32,
|
||||||
|
pub storage_size: StorageSize,
|
||||||
|
pub role: PostgreSQLClusterRole,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub enum PostgreSQLClusterRole {
|
||||||
|
Primary,
|
||||||
|
Replica(ReplicaConfig),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ReplicaConfig {
|
||||||
|
/// Name of the primary cluster this replica will sync from
|
||||||
|
pub primary_cluster_name: String,
|
||||||
|
/// Certs extracted from primary via Topology::get_replication_certs()
|
||||||
|
pub replication_certs: ReplicationCerts,
|
||||||
|
/// Bootstrap method (e.g., pg_basebackup from primary)
|
||||||
|
pub bootstrap: BootstrapConfig,
|
||||||
|
/// External cluster connection details for CNPG spec.externalClusters
|
||||||
|
pub external_cluster: ExternalClusterConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct BootstrapConfig {
|
||||||
|
pub strategy: BootstrapStrategy,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub enum BootstrapStrategy {
|
||||||
|
PgBasebackup,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ExternalClusterConfig {
|
||||||
|
/// Name used in CNPG externalClusters list
|
||||||
|
pub name: String,
|
||||||
|
/// Connection params (host/port set by multisite logic, sslmode='verify-ca', etc.)
|
||||||
|
pub connection_parameters: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ReplicationCerts {
|
||||||
|
/// PEM-encoded CA cert from primary
|
||||||
|
pub ca_cert_pem: String,
|
||||||
|
/// PEM-encoded streaming_replica client cert (tls.crt)
|
||||||
|
pub streaming_replica_cert_pem: String,
|
||||||
|
/// PEM-encoded streaming_replica client key (tls.key)
|
||||||
|
pub streaming_replica_key_pem: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct PostgreSQLEndpoint {
|
||||||
|
pub host: String,
|
||||||
|
pub port: u16,
|
||||||
|
}
|
||||||
58
harmony/src/modules/postgresql/cnpg/crd.rs
Normal file
58
harmony/src/modules/postgresql/cnpg/crd.rs
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
use kube::{CustomResource, api::ObjectMeta};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[kube(
|
||||||
|
group = "postgresql.cnpg.io",
|
||||||
|
version = "v1",
|
||||||
|
kind = "Cluster",
|
||||||
|
plural = "clusters",
|
||||||
|
namespaced = true,
|
||||||
|
schema = "disabled"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ClusterSpec {
|
||||||
|
pub instances: i32,
|
||||||
|
pub image_name: Option<String>,
|
||||||
|
pub storage: Storage,
|
||||||
|
pub bootstrap: Bootstrap,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Cluster {
|
||||||
|
fn default() -> Self {
|
||||||
|
Cluster {
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: ClusterSpec::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ClusterSpec {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
instances: 1,
|
||||||
|
image_name: None,
|
||||||
|
storage: Storage::default(),
|
||||||
|
bootstrap: Bootstrap::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Storage {
|
||||||
|
pub size: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Bootstrap {
|
||||||
|
pub initdb: Initdb,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Initdb {
|
||||||
|
pub database: String,
|
||||||
|
pub owner: String,
|
||||||
|
}
|
||||||
2
harmony/src/modules/postgresql/cnpg/mod.rs
Normal file
2
harmony/src/modules/postgresql/cnpg/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
mod crd;
|
||||||
|
pub use crd::*;
|
||||||
125
harmony/src/modules/postgresql/failover.rs
Normal file
125
harmony/src/modules/postgresql/failover.rs
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use log::debug;
|
||||||
|
use log::info;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
modules::postgresql::capability::{
|
||||||
|
BootstrapConfig, BootstrapStrategy, ExternalClusterConfig, PostgreSQL,
|
||||||
|
PostgreSQLClusterRole, PostgreSQLConfig, PostgreSQLEndpoint, ReplicaConfig,
|
||||||
|
ReplicationCerts,
|
||||||
|
},
|
||||||
|
topology::FailoverTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
info!(
|
||||||
|
"Starting deployment of failover topology '{}'",
|
||||||
|
config.cluster_name
|
||||||
|
);
|
||||||
|
|
||||||
|
let primary_config = PostgreSQLConfig {
|
||||||
|
cluster_name: config.cluster_name.clone(),
|
||||||
|
instances: config.instances,
|
||||||
|
storage_size: config.storage_size.clone(),
|
||||||
|
role: PostgreSQLClusterRole::Primary,
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Deploying primary cluster '{{}}' ({} instances, {:?} storage)",
|
||||||
|
primary_config.cluster_name, primary_config.storage_size
|
||||||
|
);
|
||||||
|
|
||||||
|
let primary_cluster_name = self.primary.deploy(&primary_config).await?;
|
||||||
|
|
||||||
|
info!("Primary cluster '{primary_cluster_name}' deployed successfully");
|
||||||
|
|
||||||
|
info!("Retrieving replication certificates for primary '{primary_cluster_name}'");
|
||||||
|
|
||||||
|
let certs = self
|
||||||
|
.primary
|
||||||
|
.get_replication_certs(&primary_cluster_name)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("Replication certificates retrieved successfully");
|
||||||
|
|
||||||
|
info!("Retrieving public endpoint for primary '{primary_cluster_name}");
|
||||||
|
|
||||||
|
let endpoint = self
|
||||||
|
.primary
|
||||||
|
.get_public_endpoint(&primary_cluster_name)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| "No public endpoint configured on primary cluster".to_string())?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Public endpoint '{}:{}' retrieved for primary",
|
||||||
|
endpoint.host, endpoint.port
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("Configuring replica connection parameters and bootstrap");
|
||||||
|
|
||||||
|
let mut connection_parameters = HashMap::new();
|
||||||
|
connection_parameters.insert("host".to_string(), endpoint.host);
|
||||||
|
connection_parameters.insert("port".to_string(), endpoint.port.to_string());
|
||||||
|
connection_parameters.insert("dbname".to_string(), "postgres".to_string());
|
||||||
|
connection_parameters.insert("user".to_string(), "streaming_replica".to_string());
|
||||||
|
connection_parameters.insert("sslmode".to_string(), "verify-ca".to_string());
|
||||||
|
connection_parameters.insert("sslnegotiation".to_string(), "direct".to_string());
|
||||||
|
|
||||||
|
debug!("Replica connection parameters: {:?}", connection_parameters);
|
||||||
|
|
||||||
|
let external_cluster = ExternalClusterConfig {
|
||||||
|
name: primary_cluster_name.clone(),
|
||||||
|
connection_parameters,
|
||||||
|
};
|
||||||
|
|
||||||
|
let bootstrap_config = BootstrapConfig {
|
||||||
|
strategy: BootstrapStrategy::PgBasebackup,
|
||||||
|
};
|
||||||
|
|
||||||
|
let replica_cluster_config = ReplicaConfig {
|
||||||
|
primary_cluster_name: primary_cluster_name.clone(),
|
||||||
|
replication_certs: certs,
|
||||||
|
bootstrap: bootstrap_config,
|
||||||
|
external_cluster,
|
||||||
|
};
|
||||||
|
|
||||||
|
let replica_config = PostgreSQLConfig {
|
||||||
|
cluster_name: format!("{}-replica", primary_cluster_name),
|
||||||
|
instances: config.instances,
|
||||||
|
storage_size: config.storage_size.clone(),
|
||||||
|
role: PostgreSQLClusterRole::Replica(replica_cluster_config),
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Deploying replica cluster '{}' ({} instances, {:?} storage) on replica topology",
|
||||||
|
replica_config.cluster_name, replica_config.instances, replica_config.storage_size
|
||||||
|
);
|
||||||
|
|
||||||
|
self.replica.deploy(&replica_config).await?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Replica cluster '{}' deployed successfully; failover topology '{}' ready",
|
||||||
|
replica_config.cluster_name, config.cluster_name
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(primary_cluster_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
self.primary.get_replication_certs(cluster_name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_endpoint(&self, cluster_name: &str) -> Result<PostgreSQLEndpoint, String> {
|
||||||
|
self.primary.get_endpoint(cluster_name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_public_endpoint(
|
||||||
|
&self,
|
||||||
|
cluster_name: &str,
|
||||||
|
) -> Result<Option<PostgreSQLEndpoint>, String> {
|
||||||
|
self.primary.get_public_endpoint(cluster_name).await
|
||||||
|
}
|
||||||
|
}
|
||||||
11
harmony/src/modules/postgresql/mod.rs
Normal file
11
harmony/src/modules/postgresql/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
pub mod capability;
|
||||||
|
mod score;
|
||||||
|
pub use score::*;
|
||||||
|
mod score_public;
|
||||||
|
pub use score_public::*;
|
||||||
|
|
||||||
|
pub mod failover;
|
||||||
|
mod operator;
|
||||||
|
pub use operator::*;
|
||||||
|
|
||||||
|
pub mod cnpg;
|
||||||
102
harmony/src/modules/postgresql/operator.rs
Normal file
102
harmony/src/modules/postgresql/operator.rs
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::interpret::Interpret;
|
||||||
|
use crate::modules::k8s::apps::crd::{Subscription, SubscriptionSpec};
|
||||||
|
use crate::modules::k8s::resource::K8sResourceScore;
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
|
||||||
|
/// Install the CloudNativePg (CNPG) Operator via an OperatorHub `Subscription`.
|
||||||
|
///
|
||||||
|
/// This Score creates a a `Subscription` Custom Resource in the specified namespace.
|
||||||
|
///
|
||||||
|
/// The default implementation pulls the `cloudnative-pg` operator from the
|
||||||
|
/// `operatorhubio-catalog` source.
|
||||||
|
///
|
||||||
|
/// # Goals
|
||||||
|
/// - Deploy the CNPG Operator to manage PostgreSQL clusters in OpenShift/OKD environments.
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
/// ```
|
||||||
|
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
||||||
|
/// let score = CloudNativePgOperatorScore::default();
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Or, you can take control of most relevant fiedls this way :
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
||||||
|
///
|
||||||
|
/// let score = CloudNativePgOperatorScore {
|
||||||
|
/// namespace: "custom-cnpg-namespace".to_string(),
|
||||||
|
/// channel: "unstable-i-want-bleedingedge-v498437".to_string(),
|
||||||
|
/// install_plan_approval: "Manual".to_string(),
|
||||||
|
/// source: "operatorhubio-catalog-but-different".to_string(),
|
||||||
|
/// source_namespace: "i-customize-everything-marketplace".to_string(),
|
||||||
|
/// };
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// # Limitations
|
||||||
|
/// - **OperatorHub dependency**: Requires OperatorHub catalog sources (e.g., `operatorhubio-catalog` in `openshift-marketplace`).
|
||||||
|
/// - **OKD/OpenShift assumption**: Catalog/source names and namespaces are hardcoded for OKD-like setups; adjust for upstream OpenShift.
|
||||||
|
/// - **Hardcoded values in Default implementation**: Operator name (`cloudnative-pg`), channel (`stable-v1`), automatic install plan approval.
|
||||||
|
/// - **No config options**: Does not support custom `SubscriptionConfig` (env vars, node selectors, tolerations).
|
||||||
|
/// - **Single namespace**: Targets one namespace per score instance.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct CloudNativePgOperatorScore {
|
||||||
|
pub namespace: String,
|
||||||
|
pub channel: String,
|
||||||
|
pub install_plan_approval: String,
|
||||||
|
pub source: String,
|
||||||
|
pub source_namespace: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CloudNativePgOperatorScore {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
namespace: "openshift-operators".to_string(),
|
||||||
|
channel: "stable-v1".to_string(),
|
||||||
|
install_plan_approval: "Automatic".to_string(),
|
||||||
|
source: "operatorhubio-catalog".to_string(),
|
||||||
|
source_namespace: "openshift-marketplace".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CloudNativePgOperatorScore {
|
||||||
|
pub fn new(namespace: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
namespace: namespace.to_string(),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient> Score<T> for CloudNativePgOperatorScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
let metadata = ObjectMeta {
|
||||||
|
name: Some("cloudnative-pg".to_string()),
|
||||||
|
namespace: Some(self.namespace.clone()),
|
||||||
|
..ObjectMeta::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let spec = SubscriptionSpec {
|
||||||
|
channel: Some(self.channel.clone()),
|
||||||
|
config: None,
|
||||||
|
install_plan_approval: Some(self.install_plan_approval.clone()),
|
||||||
|
name: "cloudnative-pg".to_string(),
|
||||||
|
source: self.source.clone(),
|
||||||
|
source_namespace: self.source_namespace.clone(),
|
||||||
|
starting_csv: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let subscription = Subscription { metadata, spec };
|
||||||
|
|
||||||
|
K8sResourceScore::single(subscription, Some(self.namespace.clone())).create_interpret()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("CloudNativePgOperatorScore({})", self.namespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
93
harmony/src/modules/postgresql/score.rs
Normal file
93
harmony/src/modules/postgresql/score.rs
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::interpret::Interpret;
|
||||||
|
use crate::modules::k8s::resource::K8sResourceScore;
|
||||||
|
use crate::modules::postgresql::cnpg::{Bootstrap, Cluster, ClusterSpec, Initdb, Storage};
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
|
||||||
|
/// Deploys an opinionated, highly available PostgreSQL cluster managed by CNPG.
|
||||||
|
///
|
||||||
|
/// # Goals
|
||||||
|
/// - Production-ready Postgres HA (3 instances), persistent storage, app DB.
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
/// ```
|
||||||
|
/// use harmony::modules::postgresql::PostgreSQLScore;
|
||||||
|
/// let score = PostgreSQLScore::new("my-app-ns");
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// # Limitations (Happy Path)
|
||||||
|
/// - Requires CNPG operator installed (use CloudNativePgOperatorScore).
|
||||||
|
/// - No backups, monitoring, extensions configured.
|
||||||
|
///
|
||||||
|
/// TODO : refactor this to declare a clean dependency on cnpg operator. Then cnpg operator will
|
||||||
|
/// self-deploy either using operatorhub or helm chart depending on k8s flavor. This is cnpg
|
||||||
|
/// specific behavior
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct PostgreSQLScore {
|
||||||
|
pub name: String,
|
||||||
|
/// **Note :** on OpenShfit based clusters, the namespace `default` has security
|
||||||
|
/// settings incompatible with the default CNPG behavior.
|
||||||
|
pub namespace: String,
|
||||||
|
pub instances: i32,
|
||||||
|
pub storage_size: String,
|
||||||
|
pub image_name: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for PostgreSQLScore {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
name: "harmony-pg".to_string(),
|
||||||
|
// We are using the namespace harmony by default since some clusters (openshift family)
|
||||||
|
// have incompatible configuration of the default namespace with cnpg
|
||||||
|
namespace: "harmony".to_string(),
|
||||||
|
instances: 1,
|
||||||
|
storage_size: "1Gi".to_string(),
|
||||||
|
image_name: None, // This lets cnpg use its default image
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PostgreSQLScore {
|
||||||
|
pub fn new(namespace: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
namespace: namespace.to_string(),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient> Score<T> for PostgreSQLScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
let metadata = ObjectMeta {
|
||||||
|
name: Some(self.name.clone()),
|
||||||
|
namespace: Some(self.namespace.clone()),
|
||||||
|
..ObjectMeta::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let spec = ClusterSpec {
|
||||||
|
instances: self.instances,
|
||||||
|
image_name: self.image_name.clone(),
|
||||||
|
storage: Storage {
|
||||||
|
size: self.storage_size.clone(),
|
||||||
|
},
|
||||||
|
bootstrap: Bootstrap {
|
||||||
|
initdb: Initdb {
|
||||||
|
database: "app".to_string(),
|
||||||
|
owner: "app".to_string(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
..ClusterSpec::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let cluster = Cluster { metadata, spec };
|
||||||
|
|
||||||
|
K8sResourceScore::single(cluster, Some(self.namespace.clone())).create_interpret()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("PostgreSQLScore({})", self.namespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
116
harmony/src/modules/postgresql/score_public.rs
Normal file
116
harmony/src/modules/postgresql/score_public.rs
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::data::Version;
|
||||||
|
use crate::domain::topology::router::{TlsRoute, TlsRouter};
|
||||||
|
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||||
|
use crate::inventory::Inventory;
|
||||||
|
use crate::modules::k8s::resource::K8sResourceScore;
|
||||||
|
use crate::modules::postgresql::PostgreSQLScore;
|
||||||
|
use crate::modules::postgresql::cnpg::{Bootstrap, Cluster, ClusterSpec, Initdb, Storage};
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
|
||||||
|
/// Deploys a public PostgreSQL cluster: CNPG + TLS passthrough route for RW endpoint.
|
||||||
|
/// For failover/multisite: exposes single-instance or small HA Postgres publicly.
|
||||||
|
///
|
||||||
|
/// Sequence: PostgreSQLScore → TlsRouter::install_route (RW backend).
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
/// ```
|
||||||
|
/// use harmony::modules::postgresql::PublicPostgreSQLScore;
|
||||||
|
/// let score = PublicPostgreSQLScore::new("harmony", "pg-rw.example.com");
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct PublicPostgreSQLScore {
|
||||||
|
/// Inner non-public Postgres cluster config.
|
||||||
|
pub inner: PostgreSQLScore,
|
||||||
|
/// Public hostname for RW TLS passthrough (port 443 → cluster-rw:5432).
|
||||||
|
pub hostname: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for PublicPostgreSQLScore {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: PostgreSQLScore::default(),
|
||||||
|
hostname: "postgres.default.public".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PublicPostgreSQLScore {
|
||||||
|
pub fn new(namespace: &str, hostname: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: PostgreSQLScore::new(namespace),
|
||||||
|
hostname: hostname.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Custom interpret: deploy Postgres then install public TLS route.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct PublicPostgreSQLInterpret {
|
||||||
|
postgres_score: PostgreSQLScore,
|
||||||
|
tls_route: TlsRoute,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Interpret<T> for PublicPostgreSQLInterpret {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("PublicPostgreSQLInterpret")
|
||||||
|
}
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
async fn execute(&self, inventory: &Inventory, topo: &T) -> Result<Outcome, InterpretError> {
|
||||||
|
// Deploy CNPG cluster first (creates -rw service)
|
||||||
|
self.postgres_score
|
||||||
|
.create_interpret()
|
||||||
|
.execute(inventory, topo)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Expose RW publicly via TLS passthrough
|
||||||
|
topo.install_route(self.tls_route.clone())
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e))?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"Public CNPG cluster '{}' deployed with TLS passthrough route '{}'",
|
||||||
|
self.postgres_score.name.clone(),
|
||||||
|
self.tls_route.hostname
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Score<T> for PublicPostgreSQLScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
let rw_backend = format!("{}-rw", self.inner.name);
|
||||||
|
let tls_route = TlsRoute {
|
||||||
|
hostname: self.hostname.clone(),
|
||||||
|
backend: rw_backend,
|
||||||
|
target_port: 5432,
|
||||||
|
};
|
||||||
|
|
||||||
|
Box::new(PublicPostgreSQLInterpret {
|
||||||
|
postgres_score: self.inner.clone(),
|
||||||
|
tls_route,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"PublicPostgreSQLScore({}:{})",
|
||||||
|
self.inner.namespace, self.hostname
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Add RO route (separate hostname/backend="cluster-ro"), backups, failover logic.
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
pub mod id;
|
pub mod id;
|
||||||
pub mod net;
|
pub mod net;
|
||||||
|
pub mod storage;
|
||||||
pub mod switch;
|
pub mod switch;
|
||||||
|
|||||||
6
harmony_types/src/storage.rs
Normal file
6
harmony_types/src/storage.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord, Debug)]
|
||||||
|
pub struct StorageSize {
|
||||||
|
size_bytes: u64,
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user