Compare commits
25 Commits
d0b7502c53
...
feat/postg
| Author | SHA1 | Date | |
|---|---|---|---|
| 204795a74f | |||
| 66a9a76a6b | |||
| 440e684b35 | |||
| b0383454f0 | |||
| 9e8f3ce52f | |||
| c3ec7070ec | |||
| 29821d5e9f | |||
| 446e079595 | |||
| e0da5764fb | |||
| e9cab92585 | |||
| d06bd4dac6 | |||
| 142300802d | |||
| 2254641f3d | |||
| b61e4f9a96 | |||
| 2e367d88d4 | |||
| 9edc42a665 | |||
| f242aafebb | |||
| 3e14ebd62c | |||
| 1b19638df4 | |||
| d39b1957cd | |||
| 357ca93d90 | |||
| 8103932f23 | |||
| 9617e1cfde | |||
| a953284386 | |||
| bfde5f58ed |
31
Cargo.lock
generated
31
Cargo.lock
generated
@@ -1776,6 +1776,21 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-multisite-postgres"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-nanodc"
|
||||
version = "0.1.0"
|
||||
@@ -1835,6 +1850,21 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-operatorhub-catalogsource"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-opnsense"
|
||||
version = "0.1.0"
|
||||
@@ -2546,6 +2576,7 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"rand 0.9.2",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"url",
|
||||
]
|
||||
|
||||
|
||||
114
adr/015-higher-order-topologies.md
Normal file
114
adr/015-higher-order-topologies.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Architecture Decision Record: Higher-Order Topologies
|
||||
|
||||
**Initial Author:** Jean-Gabriel Gill-Couture
|
||||
**Initial Date:** 2025-12-08
|
||||
**Last Updated Date:** 2025-12-08
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
|
||||
## Context
|
||||
|
||||
Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`).
|
||||
|
||||
**Higher-Order Topologies** (e.g., `FailoverTopology<T>`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`).
|
||||
|
||||
Naive design requires manual `impl Capability for HigherOrderTopology<T>` *per T per capability*, causing:
|
||||
- **Impl explosion**: N topologies × M capabilities = N×M boilerplate.
|
||||
- **ISP violation**: Topologies forced to impl unrelated capabilities.
|
||||
- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order.
|
||||
- **Barrier to extension**: Users can't easily add topologies without todos/panics.
|
||||
|
||||
This makes scaling Harmony impractical as ecosystem grows.
|
||||
|
||||
## Decision
|
||||
|
||||
Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration:
|
||||
|
||||
````rust
|
||||
/// Higher-Order Topology: Orchestrates capabilities across sub-topologies.
|
||||
pub struct FailoverTopology<T> {
|
||||
/// Primary sub-topology.
|
||||
primary: T,
|
||||
/// Replica sub-topology.
|
||||
replica: T,
|
||||
}
|
||||
|
||||
/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`.
|
||||
/// Delegates to primary for queries; orchestrates deploy across both.
|
||||
#[async_trait]
|
||||
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
// Deploy primary; extract certs/endpoint;
|
||||
// deploy replica with pg_basebackup + TLS passthrough.
|
||||
// (Full impl logged/elaborated.)
|
||||
}
|
||||
|
||||
// Delegate queries to primary.
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||
self.primary.get_replication_certs(cluster_name).await
|
||||
}
|
||||
// ...
|
||||
}
|
||||
|
||||
/// Similarly for other capabilities.
|
||||
#[async_trait]
|
||||
impl<T: Docker> Docker for FailoverTopology<T> {
|
||||
// Failover Docker orchestration.
|
||||
}
|
||||
````
|
||||
|
||||
**Key properties:**
|
||||
- **Auto-derivation**: `Failover<K8sAnywhere>` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`.
|
||||
- **No boilerplate**: One blanket impl per capability *per higher-order type*.
|
||||
|
||||
## Rationale
|
||||
|
||||
- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost.
|
||||
- **Compile-time safety**: Missing `T: Capability` → compile error (no panics).
|
||||
- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works.
|
||||
- **ISP-respecting**: Capabilities only surface if sub-topology provides.
|
||||
- **Centralized logic**: Orchestration (e.g., cert propagation) in one place.
|
||||
|
||||
**Example usage:**
|
||||
````rust
|
||||
// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG
|
||||
let pg_failover: FailoverTopology<K8sAnywhereTopology> = ...;
|
||||
pg_failover.deploy_pg(config).await;
|
||||
|
||||
// ✅ Works: LinuxHost: Docker → Failover provides failover Docker
|
||||
let docker_failover: FailoverTopology<LinuxHostTopology> = ...;
|
||||
docker_failover.deploy_docker(...).await;
|
||||
|
||||
// ❌ Compile fail: K8sAnywhere !: Docker
|
||||
let invalid: FailoverTopology<K8sAnywhereTopology>;
|
||||
invalid.deploy_docker(...); // `T: Docker` bound unsatisfied
|
||||
````
|
||||
|
||||
## Consequences
|
||||
|
||||
**Pros:**
|
||||
- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover<AWSTopology>: PostgreSQL`.
|
||||
- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`).
|
||||
- **Observable**: Logs trace every step.
|
||||
|
||||
**Cons:**
|
||||
- **Monomorphization**: Generics generate code per T (mitigated: few Ts).
|
||||
- **Delegation opacity**: Relies on rustdoc/logs for internals.
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
| Approach | Pros | Cons |
|
||||
|----------|------|------|
|
||||
| **Manual per-T impls**<br>`impl PG for Failover<K8s> {..}`<br>`impl PG for Failover<Linux> {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. |
|
||||
| **Dynamic trait objects**<br>`Box<dyn AnyCapability>` | Runtime flex | Perf hit; type erasure; error-prone dispatch. |
|
||||
| **Mega-topology trait**<br>All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. |
|
||||
| **Registry dispatch**<br>Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. |
|
||||
|
||||
**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition.
|
||||
|
||||
## Additional Notes
|
||||
|
||||
- Applies to `MultisiteTopology<T>`, `ShardedTopology<T>`, etc.
|
||||
- `FailoverTopology` in `failover.rs` is first implementation.
|
||||
153
adr/015-higher-order-topologies/example.rs
Normal file
153
adr/015-higher-order-topologies/example.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
//! Example of Higher-Order Topologies in Harmony.
|
||||
//! Demonstrates how `FailoverTopology<T>` automatically provides failover for *any* capability
|
||||
//! supported by a sub-topology `T` via blanket trait impls.
|
||||
//!
|
||||
//! Key insight: No manual impls per T or capability -- scales effortlessly.
|
||||
//! Users can:
|
||||
//! - Write new `Topology` (impl capabilities on a struct).
|
||||
//! - Compose with `FailoverTopology` (gets capabilities if T has them).
|
||||
//! - Compile fails if capability missing (safety).
|
||||
|
||||
use async_trait::async_trait;
|
||||
use tokio;
|
||||
|
||||
/// Capability trait: Deploy and manage PostgreSQL.
|
||||
#[async_trait]
|
||||
pub trait PostgreSQL {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
||||
}
|
||||
|
||||
/// Capability trait: Deploy Docker.
|
||||
#[async_trait]
|
||||
pub trait Docker {
|
||||
async fn deploy_docker(&self) -> Result<String, String>;
|
||||
}
|
||||
|
||||
/// Configuration for PostgreSQL deployments.
|
||||
#[derive(Clone)]
|
||||
pub struct PostgreSQLConfig;
|
||||
|
||||
/// Replication certificates.
|
||||
#[derive(Clone)]
|
||||
pub struct ReplicationCerts;
|
||||
|
||||
/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL).
|
||||
#[derive(Clone)]
|
||||
pub struct K8sAnywhereTopology;
|
||||
|
||||
#[async_trait]
|
||||
impl PostgreSQL for K8sAnywhereTopology {
|
||||
async fn deploy(&self, _config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
// Real impl: Use k8s helm chart, operator, etc.
|
||||
Ok("K8sAnywhere PostgreSQL deployed".to_string())
|
||||
}
|
||||
|
||||
async fn get_replication_certs(&self, _cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||
Ok(ReplicationCerts)
|
||||
}
|
||||
}
|
||||
|
||||
/// Concrete topology: Linux Host (supports Docker).
|
||||
#[derive(Clone)]
|
||||
pub struct LinuxHostTopology;
|
||||
|
||||
#[async_trait]
|
||||
impl Docker for LinuxHostTopology {
|
||||
async fn deploy_docker(&self) -> Result<String, String> {
|
||||
// Real impl: Install/configure Docker on host.
|
||||
Ok("LinuxHost Docker deployed".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica).
|
||||
/// Automatically derives *all* capabilities of `T` with failover orchestration.
|
||||
///
|
||||
/// - If `T: PostgreSQL`, then `FailoverTopology<T>: PostgreSQL` (blanket impl).
|
||||
/// - Same for `Docker`, etc. No boilerplate!
|
||||
/// - Compile-time safe: Missing `T: Capability` → error.
|
||||
#[derive(Clone)]
|
||||
pub struct FailoverTopology<T> {
|
||||
/// Primary sub-topology.
|
||||
pub primary: T,
|
||||
/// Replica sub-topology.
|
||||
pub replica: T,
|
||||
}
|
||||
|
||||
/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL.
|
||||
/// Delegates reads to primary; deploys to both.
|
||||
#[async_trait]
|
||||
impl<T: PostgreSQL + Send + Sync + Clone> PostgreSQL for FailoverTopology<T> {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
// Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup).
|
||||
let primary_result = self.primary.deploy(config).await?;
|
||||
let replica_result = self.replica.deploy(config).await?;
|
||||
Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result))
|
||||
}
|
||||
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||
// Delegate to primary (replica follows).
|
||||
self.primary.get_replication_certs(cluster_name).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Blanket impl: Failover Docker if T provides Docker.
|
||||
#[async_trait]
|
||||
impl<T: Docker + Send + Sync + Clone> Docker for FailoverTopology<T> {
|
||||
async fn deploy_docker(&self) -> Result<String, String> {
|
||||
// Orchestrate across primary + replica.
|
||||
let primary_result = self.primary.deploy_docker().await?;
|
||||
let replica_result = self.replica.deploy_docker().await?;
|
||||
Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let config = PostgreSQLConfig;
|
||||
|
||||
println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ===");
|
||||
let pg_failover = FailoverTopology {
|
||||
primary: K8sAnywhereTopology,
|
||||
replica: K8sAnywhereTopology,
|
||||
};
|
||||
let result = pg_failover.deploy(&config).await.unwrap();
|
||||
println!("Result: {}", result);
|
||||
|
||||
println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ===");
|
||||
let docker_failover = FailoverTopology {
|
||||
primary: LinuxHostTopology,
|
||||
replica: LinuxHostTopology,
|
||||
};
|
||||
let result = docker_failover.deploy_docker().await.unwrap();
|
||||
println!("Result: {}", result);
|
||||
|
||||
println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ===");
|
||||
// let invalid = FailoverTopology {
|
||||
// primary: K8sAnywhereTopology,
|
||||
// replica: K8sAnywhereTopology,
|
||||
// };
|
||||
// invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied!
|
||||
// Very clear error message :
|
||||
// error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology<K8sAnywhereTopology>`, but its trait bounds were not satisfied
|
||||
// --> src/main.rs:90:9
|
||||
// |
|
||||
// 4 | pub struct FailoverTopology<T> {
|
||||
// | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology<K8sAnywhereTopology>: Docker`
|
||||
// ...
|
||||
// 37 | struct K8sAnywhereTopology;
|
||||
// | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker`
|
||||
// ...
|
||||
// 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied
|
||||
// | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology<K8sAnywhereTopology>` due to unsatisfied trait bounds
|
||||
// |
|
||||
// note: trait bound `K8sAnywhereTopology: Docker` was not satisfied
|
||||
// --> src/main.rs:61:9
|
||||
// |
|
||||
// 61 | impl<T: Docker + Send + Sync> Docker for FailoverTopology<T> {
|
||||
// | ^^^^^^ ------ -------------------
|
||||
// | |
|
||||
// | unsatisfied trait bound introduced here
|
||||
// note: the trait `Docker` must be implemented
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ enum ExecutionMode {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BrocadeInfo {
|
||||
os: BrocadeOs,
|
||||
version: String,
|
||||
_version: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -263,7 +263,7 @@ async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, E
|
||||
|
||||
return Ok(BrocadeInfo {
|
||||
os: BrocadeOs::NetworkOperatingSystem,
|
||||
version,
|
||||
_version: version,
|
||||
});
|
||||
} else if output.contains("ICX") {
|
||||
let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
||||
@@ -276,7 +276,7 @@ async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, E
|
||||
|
||||
return Ok(BrocadeInfo {
|
||||
os: BrocadeOs::FastIron,
|
||||
version,
|
||||
_version: version,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Design Document: Harmony PostgreSQL Module
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2023-10-27
|
||||
**Last Updated:** 2025-12-01
|
||||
**Context:** Multi-site Data Replication & Orchestration
|
||||
|
||||
## 1. Overview
|
||||
|
||||
18
examples/multisite_postgres/Cargo.toml
Normal file
18
examples/multisite_postgres/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "example-multisite-postgres"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
3
examples/multisite_postgres/env_example.sh
Normal file
3
examples/multisite_postgres/env_example.sh
Normal file
@@ -0,0 +1,3 @@
|
||||
export HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY="context=default/api-your-openshift-cluster:6443/kube:admin"
|
||||
export HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA="context=someuser/somecluster"
|
||||
export RUST_LOG="harmony=debug"
|
||||
28
examples/multisite_postgres/src/main.rs
Normal file
28
examples/multisite_postgres/src/main.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::postgresql::{PublicPostgreSQLScore, capability::PostgreSQLConfig},
|
||||
topology::{FailoverTopology, K8sAnywhereTopology},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// env_logger::init();
|
||||
let postgres = PublicPostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "harmony-postgres-example".to_string(), // Override default name
|
||||
namespace: "harmony-public-postgres".to_string(),
|
||||
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||
// "default" namespace, 1 instance, 1Gi storage
|
||||
},
|
||||
hostname: "postgrestest.sto1.nationtech.io".to_string(),
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
FailoverTopology::<K8sAnywhereTopology>::from_env(),
|
||||
vec![Box::new(postgres)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
|
||||
18
examples/operatorhub_catalog/Cargo.toml
Normal file
18
examples/operatorhub_catalog/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "example-operatorhub-catalogsource"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
22
examples/operatorhub_catalog/src/main.rs
Normal file
22
examples/operatorhub_catalog/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
|
||||
let cnpg_operator = CloudNativePgOperatorScore::default();
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(operatorhub_catalog), Box::new(cnpg_operator)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
18
examples/postgresql/Cargo.toml
Normal file
18
examples/postgresql/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "example-postgresql"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
26
examples/postgresql/src/main.rs
Normal file
26
examples/postgresql/src/main.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let postgresql = PostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "harmony-postgres-example".to_string(), // Override default name
|
||||
namespace: "harmony-postgres-example".to_string(),
|
||||
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||
// "default" namespace, 1 instance, 1Gi storage
|
||||
},
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(postgresql)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
18
examples/public_postgres/Cargo.toml
Normal file
18
examples/public_postgres/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "example-public-postgres"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
38
examples/public_postgres/src/main.rs
Normal file
38
examples/public_postgres/src/main.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::postgresql::{
|
||||
K8sPostgreSQLScore, PostgreSQLConnectionScore, PublicPostgreSQLScore,
|
||||
capability::PostgreSQLConfig,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let postgres = PublicPostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "harmony-postgres-example".to_string(), // Override default name
|
||||
namespace: "harmony-public-postgres".to_string(),
|
||||
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||
// 1 instance, 1Gi storage
|
||||
},
|
||||
hostname: "postgrestest.sto1.nationtech.io".to_string(),
|
||||
};
|
||||
|
||||
let test_connection = PostgreSQLConnectionScore {
|
||||
name: "harmony-postgres-example".to_string(),
|
||||
namespace: "harmony-public-postgres".to_string(),
|
||||
cluster_name: "harmony-postgres-example".to_string(),
|
||||
hostname: Some("postgrestest.sto1.nationtech.io".to_string()),
|
||||
port_override: Some(443),
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(postgres), Box::new(test_connection)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -152,6 +152,12 @@ pub struct InterpretError {
|
||||
msg: String,
|
||||
}
|
||||
|
||||
impl From<InterpretError> for String {
|
||||
fn from(e: InterpretError) -> String {
|
||||
format!("InterpretError : {}", e.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for InterpretError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.msg)
|
||||
|
||||
64
harmony/src/domain/topology/failover.rs
Normal file
64
harmony/src/domain/topology/failover.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::topology::k8s_anywhere::K8sAnywhereConfig;
|
||||
use crate::topology::{K8sAnywhereTopology, PreparationError, PreparationOutcome, Topology};
|
||||
|
||||
pub struct FailoverTopology<T> {
|
||||
pub primary: T,
|
||||
pub replica: T,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + Send + Sync> Topology for FailoverTopology<T> {
|
||||
fn name(&self) -> &str {
|
||||
"FailoverTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
let primary_outcome = self.primary.ensure_ready().await?;
|
||||
let replica_outcome = self.replica.ensure_ready().await?;
|
||||
|
||||
match (primary_outcome, replica_outcome) {
|
||||
(PreparationOutcome::Noop, PreparationOutcome::Noop) => Ok(PreparationOutcome::Noop),
|
||||
(p, r) => {
|
||||
let mut details = Vec::new();
|
||||
if let PreparationOutcome::Success { details: d } = p {
|
||||
details.push(format!("Primary: {}", d));
|
||||
}
|
||||
if let PreparationOutcome::Success { details: d } = r {
|
||||
details.push(format!("Replica: {}", d));
|
||||
}
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: details.join(", "),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FailoverTopology<K8sAnywhereTopology> {
|
||||
/// Creates a new `FailoverTopology` from environment variables.
|
||||
///
|
||||
/// Expects two environment variables:
|
||||
/// - `HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY`: Comma-separated `key=value` pairs, e.g.,
|
||||
/// `kubeconfig=/path/to/primary.kubeconfig,context_name=primary-ctx`
|
||||
/// - `HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA`: Same format for the replica.
|
||||
///
|
||||
/// Parses `kubeconfig` (path to kubeconfig file) and `context_name` (Kubernetes context),
|
||||
/// and constructs `K8sAnywhereConfig` with local installs disabled (`use_local_k3d=false`,
|
||||
/// `autoinstall=false`, `use_system_kubeconfig=false`).
|
||||
/// `harmony_profile` is read from `HARMONY_PROFILE` env or defaults to `"dev"`.
|
||||
///
|
||||
/// Panics if required env vars are missing or malformed.
|
||||
pub fn from_env() -> Self {
|
||||
let primary_config =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var("HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY");
|
||||
let replica_config =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var("HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA");
|
||||
|
||||
let primary = K8sAnywhereTopology::with_config(primary_config);
|
||||
let replica = K8sAnywhereTopology::with_config(replica_config);
|
||||
|
||||
Self { primary, replica }
|
||||
}
|
||||
}
|
||||
@@ -451,7 +451,20 @@ impl K8sClient {
|
||||
{
|
||||
let mut result = Vec::new();
|
||||
for r in resource.iter() {
|
||||
result.push(self.apply(r, ns).await?);
|
||||
let apply_result = self.apply(r, ns).await;
|
||||
if apply_result.is_err() {
|
||||
// NOTE : We should be careful about this one, it may leak sensitive information in
|
||||
// logs
|
||||
// Maybe just reducing it to debug would be enough as we already know debug logs
|
||||
// are unsafe.
|
||||
// But keeping it at warn makes it much easier to understand what is going on. So be it for now.
|
||||
warn!(
|
||||
"Failed to apply k8s resource : {}",
|
||||
serde_json::to_string_pretty(r).map_err(|e| Error::SerdeError(e))?
|
||||
);
|
||||
}
|
||||
|
||||
result.push(apply_result?);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
@@ -618,6 +631,23 @@ impl K8sClient {
|
||||
}
|
||||
|
||||
pub async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||
Self::from_kubeconfig_with_opts(path, &KubeConfigOptions::default()).await
|
||||
}
|
||||
|
||||
pub async fn from_kubeconfig_with_context(
|
||||
path: &str,
|
||||
context: Option<String>,
|
||||
) -> Option<K8sClient> {
|
||||
let mut opts = KubeConfigOptions::default();
|
||||
opts.context = context;
|
||||
|
||||
Self::from_kubeconfig_with_opts(path, &opts).await
|
||||
}
|
||||
|
||||
pub async fn from_kubeconfig_with_opts(
|
||||
path: &str,
|
||||
opts: &KubeConfigOptions,
|
||||
) -> Option<K8sClient> {
|
||||
let k = match Kubeconfig::read_from(path) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
@@ -625,13 +655,9 @@ impl K8sClient {
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
Some(K8sClient::new(
|
||||
Client::try_from(
|
||||
Config::from_custom_kubeconfig(k, &KubeConfigOptions::default())
|
||||
.await
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap(),
|
||||
Client::try_from(Config::from_custom_kubeconfig(k, &opts).await.unwrap()).unwrap(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine, engine::general_purpose};
|
||||
use harmony_types::rfc1123::Rfc1123Name;
|
||||
use k8s_openapi::api::{
|
||||
core::v1::Secret,
|
||||
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
|
||||
@@ -34,16 +35,17 @@ use crate::{
|
||||
service_monitor::ServiceMonitor,
|
||||
},
|
||||
},
|
||||
okd::route::OKDTlsPassthroughScore,
|
||||
prometheus::{
|
||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||
prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::ingress::Ingress,
|
||||
topology::{TlsRoute, TlsRouter, ingress::Ingress},
|
||||
};
|
||||
|
||||
use super::{
|
||||
use super::super::{
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
|
||||
PreparationOutcome, Topology,
|
||||
k8s::K8sClient,
|
||||
@@ -102,6 +104,41 @@ impl K8sclient for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TlsRouter for K8sAnywhereTopology {
|
||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
/// Returns the port that this router exposes externally.
|
||||
async fn get_router_port(&self) -> u16 {
|
||||
// TODO un-hardcode this :)
|
||||
443
|
||||
}
|
||||
|
||||
async fn install_route(&self, route: TlsRoute) -> Result<(), String> {
|
||||
let distro = self
|
||||
.get_k8s_distribution()
|
||||
.await
|
||||
.map_err(|e| format!("Could not get k8s distribution {e}"))?;
|
||||
|
||||
match distro {
|
||||
KubernetesDistribution::OpenshiftFamily => {
|
||||
OKDTlsPassthroughScore {
|
||||
name: Rfc1123Name::try_from(route.backend_info_string().as_str())?,
|
||||
route,
|
||||
}
|
||||
.interpret(&Inventory::empty(), self)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
KubernetesDistribution::K3sFamily | KubernetesDistribution::Default => Err(format!(
|
||||
"Distribution not supported yet for Tlsrouter {distro:?}"
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Grafana for K8sAnywhereTopology {
|
||||
async fn ensure_grafana_operator(
|
||||
@@ -343,6 +380,7 @@ impl K8sAnywhereTopology {
|
||||
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
|
||||
self.k8s_distribution
|
||||
.get_or_try_init(async || {
|
||||
debug!("Trying to detect k8s distribution");
|
||||
let client = self.k8s_client().await.unwrap();
|
||||
|
||||
let discovery = client.discovery().await.map_err(|e| {
|
||||
@@ -358,14 +396,17 @@ impl K8sAnywhereTopology {
|
||||
.groups()
|
||||
.any(|g| g.name() == "project.openshift.io")
|
||||
{
|
||||
info!("Found KubernetesDistribution OpenshiftFamily");
|
||||
return Ok(KubernetesDistribution::OpenshiftFamily);
|
||||
}
|
||||
|
||||
// K3d / K3s
|
||||
if version.git_version.contains("k3s") {
|
||||
info!("Found KubernetesDistribution K3sFamily");
|
||||
return Ok(KubernetesDistribution::K3sFamily);
|
||||
}
|
||||
|
||||
info!("Could not identify KubernetesDistribution, using Default");
|
||||
return Ok(KubernetesDistribution::Default);
|
||||
})
|
||||
.await
|
||||
@@ -613,7 +654,7 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
async fn try_load_kubeconfig(&self, path: &str) -> Option<K8sClient> {
|
||||
K8sClient::from_kubeconfig(path).await
|
||||
K8sClient::from_kubeconfig_with_context(path, self.config.k8s_context.clone()).await
|
||||
}
|
||||
|
||||
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
|
||||
@@ -651,7 +692,14 @@ impl K8sAnywhereTopology {
|
||||
return Ok(Some(K8sState {
|
||||
client: Arc::new(client),
|
||||
source: K8sSource::Kubeconfig,
|
||||
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
|
||||
message: format!(
|
||||
"Loaded k8s client from kubeconfig {kubeconfig} using context {}",
|
||||
self.config
|
||||
.k8s_context
|
||||
.as_ref()
|
||||
.map(|s| s.clone())
|
||||
.unwrap_or_default()
|
||||
),
|
||||
}));
|
||||
}
|
||||
None => {
|
||||
@@ -891,9 +939,71 @@ pub struct K8sAnywhereConfig {
|
||||
/// default: true
|
||||
pub use_local_k3d: bool,
|
||||
pub harmony_profile: String,
|
||||
|
||||
/// Name of the kubeconfig context to use.
|
||||
///
|
||||
/// If None, it will use the current context.
|
||||
///
|
||||
/// If the context name is not found, it will fail to initialize.
|
||||
pub k8s_context: Option<String>,
|
||||
}
|
||||
|
||||
impl K8sAnywhereConfig {
|
||||
/// Reads an environment variable `env_var` and parses its content :
|
||||
/// Comma-separated `key=value` pairs, e.g.,
|
||||
/// `kubeconfig=/path/to/primary.kubeconfig,context=primary-ctx`
|
||||
///
|
||||
/// Then creates a K8sAnywhereConfig from it local installs disabled (`use_local_k3d=false`,
|
||||
/// `autoinstall=false`, `use_system_kubeconfig=false`).
|
||||
/// `harmony_profile` is read from `HARMONY_PROFILE` env or defaults to `"dev"`.
|
||||
///
|
||||
/// If no kubeconfig path is provided it will fall back to system kubeconfig
|
||||
///
|
||||
/// Panics if `env_var` is missing or malformed.
|
||||
pub fn remote_k8s_from_env_var(env_var: &str) -> Self {
|
||||
Self::remote_k8s_from_env_var_with_profile(env_var, "HARMONY_PROFILE")
|
||||
}
|
||||
|
||||
pub fn remote_k8s_from_env_var_with_profile(env_var: &str, profile_env_var: &str) -> Self {
|
||||
debug!("Looking for env var named : {env_var}");
|
||||
let env_var_value = std::env::var(env_var)
|
||||
.map_err(|e| format!("Missing required env var {env_var} : {e}"))
|
||||
.unwrap();
|
||||
info!("Initializing remote k8s from env var value : {env_var_value}");
|
||||
|
||||
let mut kubeconfig: Option<String> = None;
|
||||
let mut k8s_context: Option<String> = None;
|
||||
|
||||
for part in env_var_value.split(',') {
|
||||
let kv: Vec<&str> = part.splitn(2, '=').collect();
|
||||
if kv.len() == 2 {
|
||||
match kv[0].trim() {
|
||||
"kubeconfig" => kubeconfig = Some(kv[1].trim().to_string()),
|
||||
"context" => k8s_context = Some(kv[1].trim().to_string()),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found in {env_var} : kubeconfig {kubeconfig:?} and context {k8s_context:?}");
|
||||
|
||||
let use_system_kubeconfig = kubeconfig.is_none();
|
||||
|
||||
if let Some(kubeconfig_value) = std::env::var("KUBECONFIG").ok().map(|v| v.to_string()) {
|
||||
kubeconfig.get_or_insert(kubeconfig_value);
|
||||
}
|
||||
info!("Loading k8s environment with kubeconfig {kubeconfig:?} and context {k8s_context:?}");
|
||||
|
||||
K8sAnywhereConfig {
|
||||
kubeconfig,
|
||||
k8s_context,
|
||||
use_system_kubeconfig,
|
||||
autoinstall: false,
|
||||
use_local_k3d: false,
|
||||
harmony_profile: std::env::var(profile_env_var).unwrap_or_else(|_| "dev".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_env() -> Self {
|
||||
Self {
|
||||
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
|
||||
@@ -908,6 +1018,7 @@ impl K8sAnywhereConfig {
|
||||
),
|
||||
use_local_k3d: std::env::var("HARMONY_USE_LOCAL_K3D")
|
||||
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(true)),
|
||||
k8s_context: std::env::var("HARMONY_K8S_CONTEXT").ok(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1014,3 +1125,181 @@ impl Ingress for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
static TEST_COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
/// Sets environment variables with unique names to avoid concurrency issues between tests.
|
||||
/// Returns the names of the (config_var, profile_var) used.
|
||||
fn setup_env_vars(config_value: Option<&str>, profile_value: Option<&str>) -> (String, String) {
|
||||
let id = TEST_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
let config_var = format!("TEST_VAR_{}", id);
|
||||
let profile_var = format!("TEST_PROFILE_{}", id);
|
||||
|
||||
unsafe {
|
||||
if let Some(v) = config_value {
|
||||
std::env::set_var(&config_var, v);
|
||||
} else {
|
||||
std::env::remove_var(&config_var);
|
||||
}
|
||||
|
||||
if let Some(v) = profile_value {
|
||||
std::env::set_var(&profile_var, v);
|
||||
} else {
|
||||
std::env::remove_var(&profile_var);
|
||||
}
|
||||
}
|
||||
|
||||
(config_var, profile_var)
|
||||
}
|
||||
|
||||
/// Runs a test in a separate thread to avoid polluting the process environment.
|
||||
fn run_in_isolated_env<F>(f: F)
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
let handle = std::thread::spawn(f);
|
||||
handle.join().expect("Test thread panicked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_full() {
|
||||
let (config_var, profile_var) =
|
||||
setup_env_vars(Some("kubeconfig=/foo.kc,context=bar"), Some("testprof"));
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
assert_eq!(cfg.kubeconfig.as_deref(), Some("/foo.kc"));
|
||||
assert_eq!(cfg.k8s_context.as_deref(), Some("bar"));
|
||||
assert_eq!(cfg.harmony_profile, "testprof");
|
||||
assert!(!cfg.use_local_k3d);
|
||||
assert!(!cfg.autoinstall);
|
||||
assert!(!cfg.use_system_kubeconfig);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_only_kubeconfig() {
|
||||
let (config_var, profile_var) = setup_env_vars(Some("kubeconfig=/foo.kc"), None);
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
assert_eq!(cfg.kubeconfig.as_deref(), Some("/foo.kc"));
|
||||
assert_eq!(cfg.k8s_context, None);
|
||||
assert_eq!(cfg.harmony_profile, "dev");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_only_context() {
|
||||
run_in_isolated_env(|| {
|
||||
unsafe {
|
||||
std::env::remove_var("KUBECONFIG");
|
||||
}
|
||||
let (config_var, profile_var) = setup_env_vars(Some("context=bar"), None);
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
assert_eq!(cfg.kubeconfig, None);
|
||||
assert_eq!(cfg.k8s_context.as_deref(), Some("bar"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_unknown_key_trim() {
|
||||
run_in_isolated_env(|| {
|
||||
unsafe {
|
||||
std::env::remove_var("KUBECONFIG");
|
||||
}
|
||||
let (config_var, profile_var) = setup_env_vars(
|
||||
Some(" unknown=bla , kubeconfig= /foo.kc ,context= bar "),
|
||||
None,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
assert_eq!(cfg.kubeconfig.as_deref(), Some("/foo.kc"));
|
||||
assert_eq!(cfg.k8s_context.as_deref(), Some("bar"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_empty_malformed() {
|
||||
run_in_isolated_env(|| {
|
||||
unsafe {
|
||||
std::env::remove_var("KUBECONFIG");
|
||||
}
|
||||
let (config_var, profile_var) = setup_env_vars(Some("malformed,no=,equal"), None);
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
// Unknown/malformed ignored, defaults to None
|
||||
assert_eq!(cfg.kubeconfig, None);
|
||||
assert_eq!(cfg.k8s_context, None);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_kubeconfig_fallback() {
|
||||
run_in_isolated_env(|| {
|
||||
unsafe {
|
||||
std::env::set_var("KUBECONFIG", "/fallback/path");
|
||||
}
|
||||
let (config_var, profile_var) = setup_env_vars(Some("context=bar"), None);
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
assert_eq!(cfg.kubeconfig.as_deref(), Some("/fallback/path"));
|
||||
assert_eq!(cfg.k8s_context.as_deref(), Some("bar"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_kubeconfig_no_fallback_if_provided() {
|
||||
run_in_isolated_env(|| {
|
||||
unsafe {
|
||||
std::env::set_var("KUBECONFIG", "/fallback/path");
|
||||
}
|
||||
let (config_var, profile_var) =
|
||||
setup_env_vars(Some("kubeconfig=/primary/path,context=bar"), None);
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
// Primary path should take precedence
|
||||
assert_eq!(cfg.kubeconfig.as_deref(), Some("/primary/path"));
|
||||
assert_eq!(cfg.k8s_context.as_deref(), Some("bar"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Missing required env var")]
|
||||
fn test_remote_k8s_from_env_var_missing() {
|
||||
let (config_var, profile_var) = setup_env_vars(None, None);
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remote_k8s_from_env_var_context_key() {
|
||||
let (config_var, profile_var) = setup_env_vars(
|
||||
Some("context=default/api-sto1-harmony-mcd:6443/kube:admin"),
|
||||
None,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var);
|
||||
|
||||
assert_eq!(
|
||||
cfg.k8s_context.as_deref(),
|
||||
Some("default/api-sto1-harmony-mcd:6443/kube:admin")
|
||||
);
|
||||
}
|
||||
}
|
||||
3
harmony/src/domain/topology/k8s_anywhere/mod.rs
Normal file
3
harmony/src/domain/topology/k8s_anywhere/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod k8s_anywhere;
|
||||
mod postgres;
|
||||
pub use k8s_anywhere::*;
|
||||
125
harmony/src/domain/topology/k8s_anywhere/postgres.rs
Normal file
125
harmony/src/domain/topology/k8s_anywhere/postgres.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::{
|
||||
interpret::Outcome,
|
||||
inventory::Inventory,
|
||||
modules::postgresql::{
|
||||
K8sPostgreSQLScore,
|
||||
capability::{PostgreSQL, PostgreSQLConfig, PostgreSQLEndpoint, ReplicationCerts},
|
||||
},
|
||||
score::Score,
|
||||
topology::{K8sAnywhereTopology, K8sclient},
|
||||
};
|
||||
|
||||
use k8s_openapi::api::core::v1::{Secret, Service};
|
||||
use log::info;
|
||||
|
||||
#[async_trait]
|
||||
impl PostgreSQL for K8sAnywhereTopology {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
K8sPostgreSQLScore {
|
||||
config: config.clone(),
|
||||
}
|
||||
.interpret(&Inventory::empty(), self)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to deploy k8s postgresql : {e}"))?;
|
||||
|
||||
Ok(config.cluster_name.clone())
|
||||
}
|
||||
|
||||
/// Extracts PostgreSQL-specific replication certs (PEM format) from a deployed primary cluster.
|
||||
/// Abstracts away storage/retrieval details (e.g., secrets, files).
|
||||
async fn get_replication_certs(&self, config: &PostgreSQLConfig) -> Result<ReplicationCerts, String> {
|
||||
let cluster_name = &config.cluster_name;
|
||||
let namespace = &config.namespace;
|
||||
let k8s_client = self.k8s_client().await.map_err(|e| e.to_string())?;
|
||||
|
||||
let replication_secret_name = format!("{cluster_name}-replication");
|
||||
let replication_secret = k8s_client
|
||||
.get_resource::<Secret>(&replication_secret_name, Some(namespace))
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get {replication_secret_name}: {e}"))?
|
||||
.ok_or_else(|| format!("Replication secret '{replication_secret_name}' not found"))?;
|
||||
|
||||
let ca_secret_name = format!("{cluster_name}-ca");
|
||||
let ca_secret = k8s_client
|
||||
.get_resource::<Secret>(&ca_secret_name, Some(namespace))
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get {ca_secret_name}: {e}"))?
|
||||
.ok_or_else(|| format!("CA secret '{ca_secret_name}' not found"))?;
|
||||
|
||||
let replication_data = replication_secret
|
||||
.data
|
||||
.as_ref()
|
||||
.ok_or("Replication secret has no data".to_string())?;
|
||||
let ca_data = ca_secret
|
||||
.data
|
||||
.as_ref()
|
||||
.ok_or("CA secret has no data".to_string())?;
|
||||
|
||||
let tls_key_bs = replication_data
|
||||
.get("tls.key")
|
||||
.ok_or("missing tls.key in replication secret".to_string())?;
|
||||
let tls_crt_bs = replication_data
|
||||
.get("tls.crt")
|
||||
.ok_or("missing tls.crt in replication secret".to_string())?;
|
||||
let ca_crt_bs = ca_data
|
||||
.get("ca.crt")
|
||||
.ok_or("missing ca.crt in CA secret".to_string())?;
|
||||
|
||||
let streaming_replica_key_pem = String::from_utf8_lossy(&tls_key_bs.0).to_string();
|
||||
let streaming_replica_cert_pem = String::from_utf8_lossy(&tls_crt_bs.0).to_string();
|
||||
let ca_cert_pem = String::from_utf8_lossy(&ca_crt_bs.0).to_string();
|
||||
|
||||
info!("Successfully extracted replication certs for cluster '{cluster_name}'");
|
||||
|
||||
Ok(ReplicationCerts {
|
||||
ca_cert_pem,
|
||||
streaming_replica_cert_pem,
|
||||
streaming_replica_key_pem,
|
||||
})
|
||||
}
|
||||
|
||||
/// Gets the internal/private endpoint (e.g., k8s service FQDN:5432) for the cluster.
|
||||
async fn get_endpoint(&self, config: &PostgreSQLConfig) -> Result<PostgreSQLEndpoint, String> {
|
||||
let cluster_name = &config.cluster_name;
|
||||
let namespace = &config.namespace;
|
||||
|
||||
let k8s_client = self.k8s_client().await.map_err(|e| e.to_string())?;
|
||||
|
||||
let service_name = format!("{cluster_name}-rw");
|
||||
let service = k8s_client
|
||||
.get_resource::<Service>(&service_name, Some(namespace))
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get service '{service_name}': {e}"))?
|
||||
.ok_or_else(|| {
|
||||
format!("Service '{service_name}' not found for cluster '{cluster_name}")
|
||||
})?;
|
||||
|
||||
let ns = service
|
||||
.metadata
|
||||
.namespace
|
||||
.as_deref()
|
||||
.unwrap_or("default")
|
||||
.to_string();
|
||||
let host = format!("{service_name}.{ns}.svc.cluster.local");
|
||||
|
||||
info!("Internal endpoint for '{cluster_name}': {host}:5432");
|
||||
|
||||
Ok(PostgreSQLEndpoint { host, port: 5432 })
|
||||
}
|
||||
|
||||
// /// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough).
|
||||
// /// Returns None if no public endpoint (internal-only cluster).
|
||||
// /// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex
|
||||
// /// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait.
|
||||
// async fn get_public_endpoint(
|
||||
// &self,
|
||||
// cluster_name: &str,
|
||||
// ) -> Result<Option<PostgreSQLEndpoint>, String> {
|
||||
// // TODO: Implement OpenShift Route lookup targeting '{cluster_name}-rw' service on port 5432 with TLS passthrough
|
||||
// // For now, return None assuming internal-only access or manual route configuration
|
||||
// info!("Public endpoint lookup not implemented for '{cluster_name}', returning None");
|
||||
// Ok(None)
|
||||
// }
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
mod failover;
|
||||
mod ha_cluster;
|
||||
pub mod ingress;
|
||||
pub use failover::*;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
@@ -13,7 +15,7 @@ pub use k8s_anywhere::*;
|
||||
pub use localhost::*;
|
||||
pub mod k8s;
|
||||
mod load_balancer;
|
||||
mod router;
|
||||
pub mod router;
|
||||
mod tftp;
|
||||
use async_trait::async_trait;
|
||||
pub use ha_cluster::*;
|
||||
|
||||
@@ -1,11 +1,20 @@
|
||||
use async_trait::async_trait;
|
||||
use cidr::Ipv4Cidr;
|
||||
use derive_new::new;
|
||||
use serde::Serialize;
|
||||
|
||||
use super::{IpAddress, LogicalHost};
|
||||
|
||||
/// Basic network router abstraction (L3 IP routing/gateway).
|
||||
/// Distinguished from TlsRouter (L4 TLS passthrough).
|
||||
pub trait Router: Send + Sync {
|
||||
/// Gateway IP address for this subnet/router.
|
||||
fn get_gateway(&self) -> IpAddress;
|
||||
|
||||
/// CIDR block managed by this router.
|
||||
fn get_cidr(&self) -> Ipv4Cidr;
|
||||
|
||||
/// Logical host associated with this router.
|
||||
fn get_host(&self) -> LogicalHost;
|
||||
}
|
||||
|
||||
@@ -38,3 +47,81 @@ impl Router for UnmanagedRouter {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
/// Desired state config for a TLS passthrough route.
|
||||
/// Forwards external TLS (port 443) → backend service:target_port (no termination at router).
|
||||
/// Inspired by CNPG multisite: exposes `-rw`/`-ro` services publicly via OKD Route/HAProxy/K8s
|
||||
/// Gateway etc.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use harmony::topology::router::TlsRoute;
|
||||
/// let postgres_rw = TlsRoute {
|
||||
/// hostname: "postgres-cluster-example.public.domain.io".to_string(),
|
||||
/// backend: "postgres-cluster-example-rw".to_string(), // k8s Service or HAProxy upstream
|
||||
/// target_port: 5432,
|
||||
/// };
|
||||
/// ```
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct TlsRoute {
|
||||
/// Public hostname clients connect to (TLS SNI, port 443 implicit).
|
||||
/// Router matches this for passthrough forwarding.
|
||||
pub hostname: String,
|
||||
|
||||
/// Backend/host identifier (k8s Service, HAProxy upstream, IP/FQDN, etc.).
|
||||
pub backend: String,
|
||||
|
||||
/// Backend TCP port (Postgres: 5432).
|
||||
pub target_port: u16,
|
||||
|
||||
/// The environment in which it lives.
|
||||
/// TODO clarify how we handle this in higher level abstractions. The namespace name is a
|
||||
/// direct mapping to k8s but that could be misleading for other implementations.
|
||||
pub namespace: String,
|
||||
}
|
||||
|
||||
impl TlsRoute {
|
||||
pub fn to_string_short(&self) -> String {
|
||||
format!("{}-{}:{}", self.hostname, self.backend, self.target_port)
|
||||
}
|
||||
|
||||
pub fn backend_info_string(&self) -> String {
|
||||
format!("{}:{}", self.backend, self.target_port)
|
||||
}
|
||||
}
|
||||
|
||||
/// Installs and queries TLS passthrough routes (L4 TCP/SNI forwarding, no TLS termination).
|
||||
/// Agnostic to impl: OKD Route, AWS NLB+HAProxy, k3s Envoy Gateway, Apache ProxyPass.
|
||||
/// Used by PostgreSQL capability to expose CNPG clusters multisite (site1 → site2 replication).
|
||||
///
|
||||
/// # Usage
|
||||
/// ```ignore
|
||||
/// use harmony::topology::router::TlsRoute;
|
||||
/// // After CNPG deploy, expose RW endpoint
|
||||
/// async fn route() {
|
||||
/// let topology = okd_topology();
|
||||
/// let route = TlsRoute { /* ... */ };
|
||||
/// topology.install_route(route).await; // OKD Route, HAProxy reload, etc.
|
||||
/// }
|
||||
/// ```
|
||||
#[async_trait]
|
||||
pub trait TlsRouter: Send + Sync {
|
||||
/// Provisions the route (idempotent where possible).
|
||||
/// Example: OKD Route{ host, to: backend:target_port, tls: {passthrough} };
|
||||
/// HAProxy frontend→backend \"postgres-upstream\".
|
||||
async fn install_route(&self, config: TlsRoute) -> Result<(), String>;
|
||||
|
||||
/// Gets the base domain that can be used to deploy applications that will be automatically
|
||||
/// routed to this cluster.
|
||||
///
|
||||
/// For example, if we have *.apps.nationtech.io pointing to a public load balancer, then this
|
||||
/// function would return
|
||||
///
|
||||
/// ```
|
||||
/// Some(String::new("apps.nationtech.io"))
|
||||
/// ```
|
||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String>;
|
||||
|
||||
/// Returns the port that this router exposes externally.
|
||||
async fn get_router_port(&self) -> u16;
|
||||
}
|
||||
|
||||
@@ -17,6 +17,12 @@ use crate::{
|
||||
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
||||
};
|
||||
|
||||
/// TODO document properly the non-intuitive behavior or "roll forward only" of nmstate in general
|
||||
/// It is documented in nmstate official doc, but worth mentionning here :
|
||||
///
|
||||
/// - You create a bond, nmstate will apply it
|
||||
/// - You delete de bond from nmstate, it will NOT delete it
|
||||
/// - To delete it you have to update it with configuration set to null
|
||||
pub struct OpenShiftNmStateNetworkManager {
|
||||
k8s_client: Arc<K8sClient>,
|
||||
}
|
||||
@@ -31,6 +37,7 @@ impl std::fmt::Debug for OpenShiftNmStateNetworkManager {
|
||||
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||
debug!("Installing NMState controller...");
|
||||
// TODO use operatorhub maybe?
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_macros::hurl;
|
||||
use kube::{Api, api::GroupVersionKind};
|
||||
use log::{debug, warn};
|
||||
use kube::api::GroupVersionKind;
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::{process::Command, str::FromStr, sync::Arc};
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
@@ -13,10 +11,7 @@ use crate::{
|
||||
inventory::Inventory,
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
||||
k8s::K8sClient,
|
||||
},
|
||||
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
|
||||
@@ -0,0 +1,157 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use k8s_openapi::{
|
||||
api::core::v1::{Affinity, Toleration},
|
||||
apimachinery::pkg::apis::meta::v1::ObjectMeta,
|
||||
};
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||
#[kube(
|
||||
group = "operators.coreos.com",
|
||||
version = "v1alpha1",
|
||||
kind = "CatalogSource",
|
||||
plural = "catalogsources",
|
||||
namespaced = true,
|
||||
schema = "disabled"
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CatalogSourceSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config_map: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub display_name: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub grpc_pod_config: Option<GrpcPodConfig>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub icon: Option<Icon>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub image: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub priority: Option<i64>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub publisher: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub run_as_root: Option<bool>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub secrets: Option<Vec<String>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub source_type: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub update_strategy: Option<UpdateStrategy>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrpcPodConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub affinity: Option<Affinity>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub extract_content: Option<ExtractContent>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub memory_target: Option<Value>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node_selector: Option<BTreeMap<String, String>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub priority_class_name: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub security_context_config: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tolerations: Option<Vec<Toleration>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExtractContent {
|
||||
pub cache_dir: String,
|
||||
pub catalog_dir: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Icon {
|
||||
pub base64data: String,
|
||||
pub mediatype: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateStrategy {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub registry_poll: Option<RegistryPoll>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RegistryPoll {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub interval: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for CatalogSource {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metadata: ObjectMeta::default(),
|
||||
spec: CatalogSourceSpec {
|
||||
address: None,
|
||||
config_map: None,
|
||||
description: None,
|
||||
display_name: None,
|
||||
grpc_pod_config: None,
|
||||
icon: None,
|
||||
image: None,
|
||||
priority: None,
|
||||
publisher: None,
|
||||
run_as_root: None,
|
||||
secrets: None,
|
||||
source_type: None,
|
||||
update_strategy: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CatalogSourceSpec {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
address: None,
|
||||
config_map: None,
|
||||
description: None,
|
||||
display_name: None,
|
||||
grpc_pod_config: None,
|
||||
icon: None,
|
||||
image: None,
|
||||
priority: None,
|
||||
publisher: None,
|
||||
run_as_root: None,
|
||||
secrets: None,
|
||||
source_type: None,
|
||||
update_strategy: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
mod catalogsources_operators_coreos_com;
|
||||
pub use catalogsources_operators_coreos_com::*;
|
||||
mod subscriptions_operators_coreos_com;
|
||||
pub use subscriptions_operators_coreos_com::*;
|
||||
@@ -0,0 +1,68 @@
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use kube::CustomResource;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||
#[kube(
|
||||
group = "operators.coreos.com",
|
||||
version = "v1alpha1",
|
||||
kind = "Subscription",
|
||||
plural = "subscriptions",
|
||||
namespaced = true,
|
||||
schema = "disabled"
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SubscriptionSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub channel: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<SubscriptionConfig>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub install_plan_approval: Option<String>,
|
||||
|
||||
pub name: String,
|
||||
|
||||
pub source: String,
|
||||
|
||||
pub source_namespace: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub starting_csv: Option<String>,
|
||||
}
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SubscriptionConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub env: Option<Vec<k8s_openapi::api::core::v1::EnvVar>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node_selector: Option<std::collections::BTreeMap<String, String>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tolerations: Option<Vec<k8s_openapi::api::core::v1::Toleration>>,
|
||||
}
|
||||
|
||||
impl Default for Subscription {
|
||||
fn default() -> Self {
|
||||
Subscription {
|
||||
metadata: ObjectMeta::default(),
|
||||
spec: SubscriptionSpec::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SubscriptionSpec {
|
||||
fn default() -> SubscriptionSpec {
|
||||
SubscriptionSpec {
|
||||
name: String::new(),
|
||||
source: String::new(),
|
||||
source_namespace: String::new(),
|
||||
channel: None,
|
||||
config: None,
|
||||
install_plan_approval: None,
|
||||
starting_csv: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod operatorhub;
|
||||
pub use operatorhub::*;
|
||||
pub mod crd;
|
||||
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
// Write operatorhub catalog score
|
||||
// for now this will only support on OKD with the default catalog and operatorhub setup and does not verify OLM state or anything else. Very opinionated and bare-bones to start
|
||||
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::interpret::Interpret;
|
||||
use crate::modules::k8s::apps::crd::{
|
||||
CatalogSource, CatalogSourceSpec, RegistryPoll, UpdateStrategy,
|
||||
};
|
||||
use crate::modules::k8s::resource::K8sResourceScore;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, Topology};
|
||||
|
||||
/// Installs the CatalogSource in a cluster which already has the required services and CRDs installed.
|
||||
///
|
||||
/// ```rust
|
||||
/// use harmony::modules::k8s::apps::OperatorHubCatalogSourceScore;
|
||||
///
|
||||
/// let score = OperatorHubCatalogSourceScore::default();
|
||||
/// ```
|
||||
///
|
||||
/// Required services:
|
||||
/// - catalog-operator
|
||||
/// - olm-operator
|
||||
///
|
||||
/// They are installed by default with OKD/Openshift
|
||||
///
|
||||
/// **Warning** : this initial implementation does not manage the dependencies. They must already
|
||||
/// exist in the cluster.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OperatorHubCatalogSourceScore {
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
pub image: String,
|
||||
}
|
||||
|
||||
impl OperatorHubCatalogSourceScore {
|
||||
pub fn new(name: &str, namespace: &str, image: &str) -> Self {
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
namespace: namespace.to_string(),
|
||||
image: image.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for OperatorHubCatalogSourceScore {
|
||||
/// This default implementation will create this k8s resource :
|
||||
///
|
||||
/// ```yaml
|
||||
/// apiVersion: operators.coreos.com/v1alpha1
|
||||
/// kind: CatalogSource
|
||||
/// metadata:
|
||||
/// name: operatorhubio-catalog
|
||||
/// namespace: openshift-marketplace
|
||||
/// spec:
|
||||
/// sourceType: grpc
|
||||
/// image: quay.io/operatorhubio/catalog:latest
|
||||
/// displayName: Operatorhub Operators
|
||||
/// publisher: OperatorHub.io
|
||||
/// updateStrategy:
|
||||
/// registryPoll:
|
||||
/// interval: 60m
|
||||
/// ```
|
||||
fn default() -> Self {
|
||||
OperatorHubCatalogSourceScore {
|
||||
name: "operatorhubio-catalog".to_string(),
|
||||
namespace: "openshift-marketplace".to_string(),
|
||||
image: "quay.io/operatorhubio/catalog:latest".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for OperatorHubCatalogSourceScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let metadata = ObjectMeta {
|
||||
name: Some(self.name.clone()),
|
||||
namespace: Some(self.namespace.clone()),
|
||||
..ObjectMeta::default()
|
||||
};
|
||||
|
||||
let spec = CatalogSourceSpec {
|
||||
source_type: Some("grpc".to_string()),
|
||||
image: Some(self.image.clone()),
|
||||
display_name: Some("Operatorhub Operators".to_string()),
|
||||
publisher: Some("OperatorHub.io".to_string()),
|
||||
update_strategy: Some(UpdateStrategy {
|
||||
registry_poll: Some(RegistryPoll {
|
||||
interval: Some("60m".to_string()),
|
||||
}),
|
||||
}),
|
||||
..CatalogSourceSpec::default()
|
||||
};
|
||||
|
||||
let catalog_source = CatalogSource {
|
||||
metadata,
|
||||
spec: spec,
|
||||
};
|
||||
|
||||
K8sResourceScore::single(catalog_source, Some(self.namespace.clone())).create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("OperatorHubCatalogSourceScore({})", self.name)
|
||||
}
|
||||
}
|
||||
19
harmony/src/modules/k8s/failover.rs
Normal file
19
harmony/src/modules/k8s/failover.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::warn;
|
||||
|
||||
use crate::topology::{FailoverTopology, K8sclient, k8s::K8sClient};
|
||||
|
||||
#[async_trait]
|
||||
impl<T: K8sclient> K8sclient for FailoverTopology<T> {
|
||||
// TODO figure out how to structure this properly. This gives access only to the primary k8s
|
||||
// client, which will work in many cases but is clearly not good enough for all uses cases
|
||||
// where k8s_client can be used. Logging a warning for now.
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||
warn!(
|
||||
"Failover topology k8s_client capability currently defers to the primary only. Make sure to check this is OK for you"
|
||||
);
|
||||
self.primary.k8s_client().await
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
pub mod apps;
|
||||
pub mod deployment;
|
||||
mod failover;
|
||||
pub mod ingress;
|
||||
pub mod namespace;
|
||||
pub mod resource;
|
||||
|
||||
@@ -79,7 +79,33 @@ where
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!("Applying {} resources", self.score.resource.len());
|
||||
// TODO improve this log
|
||||
let resource_names: Vec<String> = self
|
||||
.score
|
||||
.resource
|
||||
.iter()
|
||||
.map(|r| {
|
||||
format!(
|
||||
"{}{}",
|
||||
r.meta()
|
||||
.name
|
||||
.as_ref()
|
||||
.map(|n| format!("{n}"))
|
||||
.unwrap_or_default(),
|
||||
r.meta()
|
||||
.namespace
|
||||
.as_ref()
|
||||
.map(|ns| format!("@{}", ns))
|
||||
.unwrap_or_default()
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!(
|
||||
"Applying {} resources : {}",
|
||||
resource_names.len(),
|
||||
resource_names.join(", ")
|
||||
);
|
||||
topology
|
||||
.k8s_client()
|
||||
.await
|
||||
|
||||
@@ -11,8 +11,10 @@ pub mod k8s;
|
||||
pub mod lamp;
|
||||
pub mod load_balancer;
|
||||
pub mod monitoring;
|
||||
pub mod network;
|
||||
pub mod okd;
|
||||
pub mod opnsense;
|
||||
pub mod postgresql;
|
||||
pub mod prometheus;
|
||||
pub mod storage;
|
||||
pub mod tenant;
|
||||
|
||||
18
harmony/src/modules/network/failover.rs
Normal file
18
harmony/src/modules/network/failover.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use async_trait::async_trait;
|
||||
use log::warn;
|
||||
|
||||
use crate::topology::{FailoverTopology, TlsRoute, TlsRouter};
|
||||
|
||||
#[async_trait]
|
||||
impl<T: TlsRouter> TlsRouter for FailoverTopology<T> {
|
||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String> {todo!()}
|
||||
|
||||
/// Returns the port that this router exposes externally.
|
||||
async fn get_router_port(&self) -> u16 {todo!()}
|
||||
async fn install_route(&self, config: TlsRoute) -> Result<(), String> {
|
||||
warn!(
|
||||
"Failover topology TlsRouter capability currently defers to the primary only. Make sure to check this is OK for you. The Replica Topology WILL NOT be affected here"
|
||||
);
|
||||
self.primary.install_route(config).await
|
||||
}
|
||||
}
|
||||
3
harmony/src/modules/network/mod.rs
Normal file
3
harmony/src/modules/network/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod failover;
|
||||
mod tls_router;
|
||||
pub use tls_router::*;
|
||||
91
harmony/src/modules/network/tls_router.rs
Normal file
91
harmony/src/modules/network/tls_router.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::data::Version;
|
||||
use crate::domain::topology::router::{TlsRoute, TlsRouter};
|
||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use crate::inventory::Inventory;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, Topology};
|
||||
|
||||
/// Score for provisioning a TLS passthrough route.
|
||||
/// Exposes backend services via TLS passthrough (L4 TCP/SNI forwarding).
|
||||
/// Agnostic to underlying router impl (OKD Route, HAProxy, Envoy, etc.).
|
||||
///
|
||||
/// TlsPassthroughScore relies on the TlsRouter Capability for its entire functionnality,
|
||||
/// the implementation depends entirely on how the Topology implements it.
|
||||
///
|
||||
/// # Usage
|
||||
/// ```
|
||||
/// use harmony::modules::network::TlsPassthroughScore;
|
||||
/// use harmony::topology::router::TlsRoute;
|
||||
/// let score = TlsPassthroughScore {
|
||||
/// route: TlsRoute {
|
||||
/// backend: "postgres-cluster-rw".to_string(),
|
||||
/// hostname: "postgres-rw.example.com".to_string(),
|
||||
/// target_port: 5432,
|
||||
/// },
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// # Hint
|
||||
///
|
||||
/// **This TlsPassthroughScore should be used whenever possible.** It is effectively
|
||||
/// an abstraction over the concept of tls passthrough, and it will allow much more flexible
|
||||
/// usage over multiple types of Topology than using a lower level module such as
|
||||
/// OKDTlsPassthroughScore.
|
||||
///
|
||||
/// On the other hand, some implementation specific options might not be available or practical
|
||||
/// to use through this high level TlsPassthroughScore.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct TlsPassthroughScore {
|
||||
pub route: TlsRoute,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Score<T> for TlsPassthroughScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(TlsPassthroughInterpret {
|
||||
tls_route: self.route.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!(
|
||||
"TlsRouterScore({}:{} → {})",
|
||||
self.route.backend, self.route.target_port, self.route.hostname
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Custom interpret: provisions the TLS passthrough route on the topology.
|
||||
#[derive(Debug, Clone)]
|
||||
struct TlsPassthroughInterpret {
|
||||
tls_route: TlsRoute,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Interpret<T> for TlsPassthroughInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("TlsRouterInterpret")
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
async fn execute(&self, _inventory: &Inventory, topo: &T) -> Result<Outcome, InterpretError> {
|
||||
topo.install_route(self.tls_route.clone())
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"TLS route installed: {} → {}:{}",
|
||||
self.tls_route.hostname, self.tls_route.backend, self.tls_route.target_port
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -1 +1,2 @@
|
||||
pub mod nmstate;
|
||||
pub mod route;
|
||||
|
||||
287
harmony/src/modules/okd/crd/route.rs
Normal file
287
harmony/src/modules/okd/crd/route.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ListMeta, ObjectMeta, Time};
|
||||
use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString;
|
||||
use k8s_openapi::{NamespaceResourceScope, Resource};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LocalObjectReference {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Route {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub api_version: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub kind: Option<String>,
|
||||
pub metadata: ObjectMeta,
|
||||
|
||||
pub spec: RouteSpec,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<RouteStatus>,
|
||||
}
|
||||
|
||||
impl Resource for Route {
|
||||
const API_VERSION: &'static str = "route.openshift.io/v1";
|
||||
const GROUP: &'static str = "route.openshift.io";
|
||||
const VERSION: &'static str = "v1";
|
||||
const KIND: &'static str = "Route";
|
||||
const URL_PATH_SEGMENT: &'static str = "routes";
|
||||
type Scope = NamespaceResourceScope;
|
||||
}
|
||||
|
||||
impl k8s_openapi::Metadata for Route {
|
||||
type Ty = ObjectMeta;
|
||||
|
||||
fn metadata(&self) -> &Self::Ty {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Route {
|
||||
fn default() -> Self {
|
||||
Route {
|
||||
api_version: Some("route.openshift.io/v1".to_string()),
|
||||
kind: Some("Route".to_string()),
|
||||
metadata: ObjectMeta::default(),
|
||||
spec: RouteSpec::default(),
|
||||
status: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteList {
|
||||
pub metadata: ListMeta,
|
||||
pub items: Vec<Route>,
|
||||
}
|
||||
|
||||
impl Default for RouteList {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metadata: ListMeta::default(),
|
||||
items: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resource for RouteList {
|
||||
const API_VERSION: &'static str = "route.openshift.io/v1";
|
||||
const GROUP: &'static str = "route.openshift.io";
|
||||
const VERSION: &'static str = "v1";
|
||||
const KIND: &'static str = "RouteList";
|
||||
const URL_PATH_SEGMENT: &'static str = "routes";
|
||||
type Scope = NamespaceResourceScope;
|
||||
}
|
||||
|
||||
impl k8s_openapi::Metadata for RouteList {
|
||||
type Ty = ListMeta;
|
||||
|
||||
fn metadata(&self) -> &Self::Ty {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub alternate_backends: Option<Vec<RouteTargetReference>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub host: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub http_headers: Option<RouteHTTPHeaders>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub path: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<RoutePort>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub subdomain: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tls: Option<TLSConfig>,
|
||||
|
||||
pub to: RouteTargetReference,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub wildcard_policy: Option<String>,
|
||||
}
|
||||
impl Default for RouteSpec {
|
||||
fn default() -> RouteSpec {
|
||||
RouteSpec {
|
||||
alternate_backends: None,
|
||||
host: None,
|
||||
http_headers: None,
|
||||
path: None,
|
||||
port: None,
|
||||
subdomain: None,
|
||||
tls: None,
|
||||
to: RouteTargetReference::default(),
|
||||
wildcard_policy: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteTargetReference {
|
||||
pub kind: String,
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub weight: Option<i32>,
|
||||
}
|
||||
impl Default for RouteTargetReference {
|
||||
fn default() -> RouteTargetReference {
|
||||
RouteTargetReference {
|
||||
kind: String::default(),
|
||||
name: String::default(),
|
||||
weight: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RoutePort {
|
||||
pub target_port: u16,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TLSConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ca_certificate: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub certificate: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub destination_ca_certificate: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub external_certificate: Option<LocalObjectReference>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub insecure_edge_termination_policy: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub key: Option<String>,
|
||||
|
||||
pub termination: String,
|
||||
}
|
||||
|
||||
impl Default for TLSConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
ca_certificate: None,
|
||||
certificate: None,
|
||||
destination_ca_certificate: None,
|
||||
external_certificate: None,
|
||||
insecure_edge_termination_policy: None,
|
||||
key: None,
|
||||
termination: "edge".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ingress: Option<Vec<RouteIngress>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteIngress {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub host: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub router_canonical_hostname: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub router_name: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub wildcard_policy: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub conditions: Option<Vec<RouteIngressCondition>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteIngressCondition {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_transition_time: Option<Time>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub reason: Option<String>,
|
||||
|
||||
pub status: String,
|
||||
#[serde(rename = "type")]
|
||||
pub condition_type: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteHTTPHeader {
|
||||
pub name: String,
|
||||
pub action: RouteHTTPHeaderActionUnion,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteHTTPHeaderActionUnion {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub set: Option<RouteSetHTTPHeader>,
|
||||
|
||||
#[serde(rename = "type")]
|
||||
pub action_type: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteSetHTTPHeader {
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteHTTPHeaderActions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub request: Option<Vec<RouteHTTPHeader>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub response: Option<Vec<RouteHTTPHeader>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteHTTPHeaders {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub actions: Option<RouteHTTPHeaderActions>,
|
||||
}
|
||||
@@ -12,6 +12,7 @@ pub mod dns;
|
||||
pub mod installation;
|
||||
pub mod ipxe;
|
||||
pub mod load_balancer;
|
||||
pub mod route;
|
||||
pub mod templates;
|
||||
pub mod upgrade;
|
||||
pub use bootstrap_01_prepare::*;
|
||||
|
||||
105
harmony/src/modules/okd/route.rs
Normal file
105
harmony/src/modules/okd/route.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
// TODO
|
||||
// Write OKDRouteScore : This is the real one which will apply the k8s resource and expose all
|
||||
// relevant option to Harmony's various use cases
|
||||
//
|
||||
// Write OKDTlsPassthroughScore : This one will use an OKDRouteScore under the hood and simply fill
|
||||
// in all settings to make this route a TlsPassthrough
|
||||
//
|
||||
// These scores are meant to be used by an OKD based topology to provide Capabilities like
|
||||
// TlsRouter
|
||||
//
|
||||
// The first use case to serve here is the postgresql multisite setup, so exposing only the
|
||||
// settings relevant to this use case is enough at first, following YAGNI.
|
||||
//
|
||||
// These scores are not intended to be used directly by a user, unless the user knows that he will
|
||||
// always be dealing only with okd/openshift compatible topologies and is ready to manage the
|
||||
// additional maintenance burden that comes with a lower level functionnality.
|
||||
|
||||
use harmony_types::rfc1123::Rfc1123Name;
|
||||
use kube::api::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::modules::k8s::resource::K8sResourceScore;
|
||||
use crate::modules::okd::crd::route::{
|
||||
Route, RoutePort, RouteSpec, RouteTargetReference, TLSConfig,
|
||||
};
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, TlsRoute, Topology};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OKDRouteScore {
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
pub spec: RouteSpec,
|
||||
}
|
||||
|
||||
impl OKDRouteScore {
|
||||
pub fn new(name: &str, namespace: &str, spec: RouteSpec) -> Self {
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
namespace: namespace.to_string(),
|
||||
spec,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for OKDRouteScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
let route = Route {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.name.clone()),
|
||||
namespace: Some(self.namespace.clone()),
|
||||
..ObjectMeta::default()
|
||||
},
|
||||
spec: self.spec.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
K8sResourceScore::single(route, Some(self.namespace.clone())).create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("OKDRouteScore({})", self.name)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OKDTlsPassthroughScore {
|
||||
pub route: TlsRoute,
|
||||
pub name: Rfc1123Name,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for OKDTlsPassthroughScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
let passthrough_spec = RouteSpec {
|
||||
host: Some(self.route.hostname.clone()),
|
||||
wildcard_policy: Some("None".to_string()),
|
||||
to: RouteTargetReference {
|
||||
kind: "Service".to_string(),
|
||||
name: self.route.backend.clone(),
|
||||
weight: Some(100),
|
||||
},
|
||||
port: Some(RoutePort {
|
||||
target_port: self.route.target_port,
|
||||
}),
|
||||
tls: Some(TLSConfig {
|
||||
termination: "passthrough".to_string(),
|
||||
insecure_edge_termination_policy: Some("None".to_string()),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
let route_score = OKDRouteScore::new(
|
||||
&self.name.to_string(),
|
||||
&self.route.namespace,
|
||||
passthrough_spec,
|
||||
);
|
||||
route_score.create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!(
|
||||
"OKDTlsPassthroughScore({}:{}/{} → {})",
|
||||
self.route.backend, self.route.target_port, self.route.namespace, self.route.hostname
|
||||
)
|
||||
}
|
||||
}
|
||||
107
harmony/src/modules/postgresql/capability.rs
Normal file
107
harmony/src/modules/postgresql/capability.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::storage::StorageSize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[async_trait]
|
||||
pub trait PostgreSQL: Send + Sync {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||
|
||||
/// Extracts PostgreSQL-specific replication certs (PEM format) from a deployed primary cluster.
|
||||
/// Abstracts away storage/retrieval details (e.g., secrets, files).
|
||||
async fn get_replication_certs(&self, config: &PostgreSQLConfig) -> Result<ReplicationCerts, String>;
|
||||
|
||||
/// Gets the internal/private endpoint (e.g., k8s service FQDN:5432) for the cluster.
|
||||
async fn get_endpoint(&self, config: &PostgreSQLConfig) -> Result<PostgreSQLEndpoint, String>;
|
||||
|
||||
// /// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough).
|
||||
// /// Returns None if no public endpoint (internal-only cluster).
|
||||
// /// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex
|
||||
// /// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait.
|
||||
// async fn get_public_endpoint(
|
||||
// &self,
|
||||
// cluster_name: &str,
|
||||
// ) -> Result<Option<PostgreSQLEndpoint>, String>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct PostgreSQLConfig {
|
||||
pub cluster_name: String,
|
||||
pub instances: u32,
|
||||
pub storage_size: StorageSize,
|
||||
pub role: PostgreSQLClusterRole,
|
||||
/// **Note :** on OpenShfit based clusters, the namespace `default` has security
|
||||
/// settings incompatible with the default CNPG behavior.
|
||||
pub namespace: String,
|
||||
}
|
||||
impl PostgreSQLConfig {
|
||||
pub fn with_namespace(&self, namespace: &str) -> PostgreSQLConfig {
|
||||
let mut new = self.clone();
|
||||
new.namespace = namespace.to_string();
|
||||
new
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PostgreSQLConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
cluster_name: "harmony-pg".to_string(),
|
||||
instances: 1,
|
||||
storage_size: StorageSize::gi(1),
|
||||
role: PostgreSQLClusterRole::Primary,
|
||||
namespace: "harmony".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub enum PostgreSQLClusterRole {
|
||||
Primary,
|
||||
Replica(ReplicaConfig),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ReplicaConfig {
|
||||
/// Name of the primary cluster this replica will sync from
|
||||
pub primary_cluster_name: String,
|
||||
/// Certs extracted from primary via Topology::get_replication_certs()
|
||||
pub replication_certs: ReplicationCerts,
|
||||
/// Bootstrap method (e.g., pg_basebackup from primary)
|
||||
pub bootstrap: BootstrapConfig,
|
||||
/// External cluster connection details for CNPG spec.externalClusters
|
||||
pub external_cluster: ExternalClusterConfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct BootstrapConfig {
|
||||
pub strategy: BootstrapStrategy,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub enum BootstrapStrategy {
|
||||
PgBasebackup,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ExternalClusterConfig {
|
||||
/// Name used in CNPG externalClusters list
|
||||
pub name: String,
|
||||
/// Connection params (host/port set by multisite logic, sslmode='verify-ca', etc.)
|
||||
pub connection_parameters: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ReplicationCerts {
|
||||
/// PEM-encoded CA cert from primary
|
||||
pub ca_cert_pem: String,
|
||||
/// PEM-encoded streaming_replica client cert (tls.crt)
|
||||
pub streaming_replica_cert_pem: String,
|
||||
/// PEM-encoded streaming_replica client key (tls.key)
|
||||
pub streaming_replica_key_pem: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PostgreSQLEndpoint {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
}
|
||||
58
harmony/src/modules/postgresql/cnpg/crd.rs
Normal file
58
harmony/src/modules/postgresql/cnpg/crd.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use kube::{CustomResource, api::ObjectMeta};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||
#[kube(
|
||||
group = "postgresql.cnpg.io",
|
||||
version = "v1",
|
||||
kind = "Cluster",
|
||||
plural = "clusters",
|
||||
namespaced = true,
|
||||
schema = "disabled"
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClusterSpec {
|
||||
pub instances: u32,
|
||||
pub image_name: Option<String>,
|
||||
pub storage: Storage,
|
||||
pub bootstrap: Bootstrap,
|
||||
}
|
||||
|
||||
impl Default for Cluster {
|
||||
fn default() -> Self {
|
||||
Cluster {
|
||||
metadata: ObjectMeta::default(),
|
||||
spec: ClusterSpec::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ClusterSpec {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
instances: 1,
|
||||
image_name: None,
|
||||
storage: Storage::default(),
|
||||
bootstrap: Bootstrap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Storage {
|
||||
pub size: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Bootstrap {
|
||||
pub initdb: Initdb,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Initdb {
|
||||
pub database: String,
|
||||
pub owner: String,
|
||||
}
|
||||
2
harmony/src/modules/postgresql/cnpg/mod.rs
Normal file
2
harmony/src/modules/postgresql/cnpg/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
mod crd;
|
||||
pub use crd::*;
|
||||
130
harmony/src/modules/postgresql/failover.rs
Normal file
130
harmony/src/modules/postgresql/failover.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::topology::TlsRouter;
|
||||
use crate::{
|
||||
modules::postgresql::capability::{
|
||||
BootstrapConfig, BootstrapStrategy, ExternalClusterConfig, PostgreSQL,
|
||||
PostgreSQLClusterRole, PostgreSQLConfig, PostgreSQLEndpoint, ReplicaConfig,
|
||||
ReplicationCerts,
|
||||
},
|
||||
topology::FailoverTopology,
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl<T: PostgreSQL + TlsRouter> PostgreSQL for FailoverTopology<T> {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
info!(
|
||||
"Starting deployment of failover topology '{}'",
|
||||
config.cluster_name
|
||||
);
|
||||
|
||||
let primary_config = PostgreSQLConfig {
|
||||
cluster_name: config.cluster_name.clone(),
|
||||
instances: config.instances,
|
||||
storage_size: config.storage_size.clone(),
|
||||
role: PostgreSQLClusterRole::Primary,
|
||||
namespace: config.namespace.clone(),
|
||||
};
|
||||
|
||||
info!(
|
||||
"Deploying primary cluster '{{}}' ({} instances, {:?} storage)",
|
||||
primary_config.cluster_name, primary_config.storage_size
|
||||
);
|
||||
|
||||
let primary_cluster_name = self.primary.deploy(&primary_config).await?;
|
||||
|
||||
info!("Primary cluster '{primary_cluster_name}' deployed successfully");
|
||||
|
||||
info!("Retrieving replication certificates for primary '{primary_cluster_name}'");
|
||||
|
||||
let certs = self.primary.get_replication_certs(&primary_config).await?;
|
||||
|
||||
info!("Replication certificates retrieved successfully");
|
||||
|
||||
info!("Retrieving public endpoint for primary '{primary_cluster_name}");
|
||||
|
||||
// TODO we should be getting the public endpoint for a service by calling a method on
|
||||
// TlsRouter capability.
|
||||
// Something along the lines of `TlsRouter::get_hostname_for_service(...).await?;`
|
||||
let endpoint = PostgreSQLEndpoint {
|
||||
host: "postgrestest.sto1.nationtech.io".to_string(),
|
||||
port: self.primary.get_router_port().await,
|
||||
};
|
||||
|
||||
info!(
|
||||
"Public endpoint '{}:{}' retrieved for primary",
|
||||
endpoint.host, endpoint.port
|
||||
);
|
||||
|
||||
info!("Configuring replica connection parameters and bootstrap");
|
||||
|
||||
let mut connection_parameters = HashMap::new();
|
||||
connection_parameters.insert("host".to_string(), endpoint.host);
|
||||
connection_parameters.insert("port".to_string(), endpoint.port.to_string());
|
||||
connection_parameters.insert("dbname".to_string(), "postgres".to_string());
|
||||
connection_parameters.insert("user".to_string(), "streaming_replica".to_string());
|
||||
connection_parameters.insert("sslmode".to_string(), "verify-ca".to_string());
|
||||
connection_parameters.insert("sslnegotiation".to_string(), "direct".to_string());
|
||||
|
||||
debug!("Replica connection parameters: {:?}", connection_parameters);
|
||||
|
||||
let external_cluster = ExternalClusterConfig {
|
||||
name: primary_cluster_name.clone(),
|
||||
connection_parameters,
|
||||
};
|
||||
|
||||
let bootstrap_config = BootstrapConfig {
|
||||
strategy: BootstrapStrategy::PgBasebackup,
|
||||
};
|
||||
|
||||
let replica_cluster_config = ReplicaConfig {
|
||||
primary_cluster_name: primary_cluster_name.clone(),
|
||||
replication_certs: certs,
|
||||
bootstrap: bootstrap_config,
|
||||
external_cluster,
|
||||
};
|
||||
|
||||
let replica_config = PostgreSQLConfig {
|
||||
cluster_name: format!("{}-replica", primary_cluster_name),
|
||||
instances: config.instances,
|
||||
storage_size: config.storage_size.clone(),
|
||||
role: PostgreSQLClusterRole::Replica(replica_cluster_config),
|
||||
namespace: config.namespace.clone(),
|
||||
};
|
||||
|
||||
info!(
|
||||
"Deploying replica cluster '{}' ({} instances, {:?} storage) on replica topology",
|
||||
replica_config.cluster_name, replica_config.instances, replica_config.storage_size
|
||||
);
|
||||
|
||||
self.replica.deploy(&replica_config).await?;
|
||||
|
||||
info!(
|
||||
"Replica cluster '{}' deployed successfully; failover topology '{}' ready",
|
||||
replica_config.cluster_name, replica_config.cluster_name
|
||||
);
|
||||
|
||||
Ok(primary_cluster_name)
|
||||
}
|
||||
|
||||
async fn get_replication_certs(
|
||||
&self,
|
||||
config: &PostgreSQLConfig,
|
||||
) -> Result<ReplicationCerts, String> {
|
||||
self.primary.get_replication_certs(config).await
|
||||
}
|
||||
|
||||
async fn get_endpoint(&self, config: &PostgreSQLConfig) -> Result<PostgreSQLEndpoint, String> {
|
||||
self.primary.get_endpoint(config).await
|
||||
}
|
||||
|
||||
// async fn get_public_endpoint(
|
||||
// &self,
|
||||
// cluster_name: &str,
|
||||
// ) -> Result<Option<PostgreSQLEndpoint>, String> {
|
||||
// self.primary.get_public_endpoint(cluster_name).await
|
||||
// }
|
||||
}
|
||||
16
harmony/src/modules/postgresql/mod.rs
Normal file
16
harmony/src/modules/postgresql/mod.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
pub mod capability;
|
||||
mod score_connect;
|
||||
mod score_k8s;
|
||||
pub use score_connect::*;
|
||||
pub use score_k8s::*;
|
||||
mod score_public;
|
||||
pub use score_public::*;
|
||||
|
||||
pub mod failover;
|
||||
mod operator;
|
||||
pub use operator::*;
|
||||
|
||||
mod score;
|
||||
pub use score::*;
|
||||
|
||||
pub mod cnpg;
|
||||
102
harmony/src/modules/postgresql/operator.rs
Normal file
102
harmony/src/modules/postgresql/operator.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::interpret::Interpret;
|
||||
use crate::modules::k8s::apps::crd::{Subscription, SubscriptionSpec};
|
||||
use crate::modules::k8s::resource::K8sResourceScore;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, Topology};
|
||||
|
||||
/// Install the CloudNativePg (CNPG) Operator via an OperatorHub `Subscription`.
|
||||
///
|
||||
/// This Score creates a a `Subscription` Custom Resource in the specified namespace.
|
||||
///
|
||||
/// The default implementation pulls the `cloudnative-pg` operator from the
|
||||
/// `operatorhubio-catalog` source.
|
||||
///
|
||||
/// # Goals
|
||||
/// - Deploy the CNPG Operator to manage PostgreSQL clusters in OpenShift/OKD environments.
|
||||
///
|
||||
/// # Usage
|
||||
/// ```
|
||||
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
||||
/// let score = CloudNativePgOperatorScore::default();
|
||||
/// ```
|
||||
///
|
||||
/// Or, you can take control of most relevant fiedls this way :
|
||||
///
|
||||
/// ```
|
||||
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
||||
///
|
||||
/// let score = CloudNativePgOperatorScore {
|
||||
/// namespace: "custom-cnpg-namespace".to_string(),
|
||||
/// channel: "unstable-i-want-bleedingedge-v498437".to_string(),
|
||||
/// install_plan_approval: "Manual".to_string(),
|
||||
/// source: "operatorhubio-catalog-but-different".to_string(),
|
||||
/// source_namespace: "i-customize-everything-marketplace".to_string(),
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// # Limitations
|
||||
/// - **OperatorHub dependency**: Requires OperatorHub catalog sources (e.g., `operatorhubio-catalog` in `openshift-marketplace`).
|
||||
/// - **OKD/OpenShift assumption**: Catalog/source names and namespaces are hardcoded for OKD-like setups; adjust for upstream OpenShift.
|
||||
/// - **Hardcoded values in Default implementation**: Operator name (`cloudnative-pg`), channel (`stable-v1`), automatic install plan approval.
|
||||
/// - **No config options**: Does not support custom `SubscriptionConfig` (env vars, node selectors, tolerations).
|
||||
/// - **Single namespace**: Targets one namespace per score instance.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CloudNativePgOperatorScore {
|
||||
pub namespace: String,
|
||||
pub channel: String,
|
||||
pub install_plan_approval: String,
|
||||
pub source: String,
|
||||
pub source_namespace: String,
|
||||
}
|
||||
|
||||
impl Default for CloudNativePgOperatorScore {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
namespace: "openshift-operators".to_string(),
|
||||
channel: "stable-v1".to_string(),
|
||||
install_plan_approval: "Automatic".to_string(),
|
||||
source: "operatorhubio-catalog".to_string(),
|
||||
source_namespace: "openshift-marketplace".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CloudNativePgOperatorScore {
|
||||
pub fn new(namespace: &str) -> Self {
|
||||
Self {
|
||||
namespace: namespace.to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for CloudNativePgOperatorScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let metadata = ObjectMeta {
|
||||
name: Some("cloudnative-pg".to_string()),
|
||||
namespace: Some(self.namespace.clone()),
|
||||
..ObjectMeta::default()
|
||||
};
|
||||
|
||||
let spec = SubscriptionSpec {
|
||||
channel: Some(self.channel.clone()),
|
||||
config: None,
|
||||
install_plan_approval: Some(self.install_plan_approval.clone()),
|
||||
name: "cloudnative-pg".to_string(),
|
||||
source: self.source.clone(),
|
||||
source_namespace: self.source_namespace.clone(),
|
||||
starting_csv: None,
|
||||
};
|
||||
|
||||
let subscription = Subscription { metadata, spec };
|
||||
|
||||
K8sResourceScore::single(subscription, Some(self.namespace.clone())).create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("CloudNativePgOperatorScore({})", self.namespace)
|
||||
}
|
||||
}
|
||||
106
harmony/src/modules/postgresql/score.rs
Normal file
106
harmony/src/modules/postgresql/score.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::data::Version;
|
||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use crate::inventory::Inventory;
|
||||
use crate::modules::postgresql::capability::{PostgreSQL, PostgreSQLConfig};
|
||||
use crate::score::Score;
|
||||
use crate::topology::Topology;
|
||||
|
||||
/// High-level, infrastructure-agnostic PostgreSQL deployment score.
|
||||
///
|
||||
/// Delegates to the Topology's PostgreSQL capability implementation,
|
||||
/// allowing flexibility in deployment strategy (k8s/CNPG, cloud-managed, etc.).
|
||||
///
|
||||
/// # Usage
|
||||
/// ```
|
||||
/// use harmony::modules::postgresql::PostgreSQLScore;
|
||||
/// let score = PostgreSQLScore::new("harmony");
|
||||
/// ```
|
||||
///
|
||||
/// # Design
|
||||
/// - PostgreSQLScore: High-level, relies on Topology's PostgreSQL implementation
|
||||
/// - Topology implements PostgreSQL capability (decoupled from score)
|
||||
/// - K8s topologies use K8sPostgreSQLScore internally for CNPG deployment
|
||||
///
|
||||
/// This layered approach gives users choice:
|
||||
/// - Use PostgreSQLScore for portability across topologies
|
||||
/// - Use K8sPostgreSQLScore directly for k8s-specific control
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct PostgreSQLScore {
|
||||
pub config: PostgreSQLConfig,
|
||||
}
|
||||
|
||||
impl Default for PostgreSQLScore {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
config: PostgreSQLConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PostgreSQLScore {
|
||||
pub fn new(namespace: &str) -> Self {
|
||||
Self {
|
||||
config: PostgreSQLConfig {
|
||||
namespace: namespace.to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + PostgreSQL + Send + Sync> Score<T> for PostgreSQLScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(PostgreSQLInterpret {
|
||||
config: self.config.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!(
|
||||
"PostgreSQLScore({}:{})",
|
||||
self.config.namespace, self.config.cluster_name
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Interpret implementation that delegates to Topology's PostgreSQL capability.
|
||||
#[derive(Debug, Clone)]
|
||||
struct PostgreSQLInterpret {
|
||||
config: PostgreSQLConfig,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + PostgreSQL + Send + Sync> Interpret<T> for PostgreSQLInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("PostgreSQLInterpret")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn execute(&self, _inventory: &Inventory, topo: &T) -> Result<Outcome, InterpretError> {
|
||||
// Delegate to topology's PostgreSQL capability
|
||||
let cluster_name = topo
|
||||
.deploy(&self.config)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e))?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"PostgreSQL cluster '{}' deployed in namespace '{}'",
|
||||
cluster_name, self.config.namespace
|
||||
)))
|
||||
}
|
||||
}
|
||||
442
harmony/src/modules/postgresql/score_connect.rs
Normal file
442
harmony/src/modules/postgresql/score_connect.rs
Normal file
@@ -0,0 +1,442 @@
|
||||
use async_trait::async_trait;
|
||||
use k8s_openapi::ByteString;
|
||||
use k8s_openapi::api::core::v1::Secret;
|
||||
use log::{debug, error, info, trace};
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::process::Command;
|
||||
|
||||
use crate::data::Version;
|
||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use crate::inventory::Inventory;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, Topology};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
/// PostgreSQLConnectionScore tests PostgreSQL database connectivity and performance metrics
|
||||
/// for databases exposed via public endpoints. This score is specifically designed to verify
|
||||
/// that PostgreSQL instances installed using the PublicPostgreSQLScore can be accessed by external clients.
|
||||
///
|
||||
/// The score performs the following tests:
|
||||
/// 1. Verifies TLS/SSL connection using CA certificates from Kubernetes secrets
|
||||
/// 2. Tests basic connectivity to the database
|
||||
/// 3. (Optional, when db permissions are setup) Collects comprehensive performance metrics including :
|
||||
/// - Database size and schema usage statistics
|
||||
/// - Active connections and query activity
|
||||
/// - Performance metrics (transactions per second, cache hit ratio)
|
||||
/// - Index usage and table statistics
|
||||
/// - Configuration parameters
|
||||
///
|
||||
/// The implementation uses a Docker container running PostgreSQL client tools to execute
|
||||
/// the connection test, ensuring consistent behavior across different environments.
|
||||
///
|
||||
/// # Kubernetes Secrets Required
|
||||
///
|
||||
/// The score requires two Kubernetes secrets in the target namespace:
|
||||
/// - `{cluster_name}-app`: Contains connection parameters (host, port, username, password, dbname)
|
||||
/// - `{cluster_name}-ca`: Contains CA certificate (ca.crt) for TLS verification
|
||||
///
|
||||
/// # Usage
|
||||
///
|
||||
/// ```rust
|
||||
/// use harmony::modules::postgresql::PostgreSQLConnectionScore;
|
||||
///
|
||||
/// let score = PostgreSQLConnectionScore::new(
|
||||
/// "default",
|
||||
/// "my-postgres-cluster",
|
||||
/// None
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `namespace`: Kubernetes namespace where the PostgreSQL secrets are located
|
||||
/// - `cluster_name`: Name of the PostgreSQL cluster (used to construct secret names)
|
||||
/// - `hostname_override`: Optional hostname override for connection testing
|
||||
/// - `port_override`: Optional port override for connection testing
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct PostgreSQLConnectionScore {
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
pub cluster_name: String,
|
||||
pub hostname: Option<String>,
|
||||
pub port_override: Option<u16>,
|
||||
}
|
||||
|
||||
fn decode_secret(data: &BTreeMap<String, ByteString>, key: &str) -> Result<String, InterpretError> {
|
||||
let val = data
|
||||
.get(key)
|
||||
.ok_or_else(|| InterpretError::new(format!("Secret missing key {}", key)))?;
|
||||
String::from_utf8(val.0.clone())
|
||||
.map_err(|e| InterpretError::new(format!("Failed to decode {}: {}", key, e)))
|
||||
}
|
||||
|
||||
impl PostgreSQLConnectionScore {
|
||||
pub fn new(namespace: &str, cluster_name: &str, hostname_override: Option<String>) -> Self {
|
||||
Self {
|
||||
name: format!("postgres-connection-{}", cluster_name),
|
||||
namespace: namespace.to_string(),
|
||||
cluster_name: cluster_name.to_string(),
|
||||
hostname: hostname_override,
|
||||
port_override: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + Send + Sync> Score<T> for PostgreSQLConnectionScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(PostgreSQLConnectionInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("PostgreSQLConnectionScore : {}", self.name)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct PostgreSQLConnectionInterpret {
|
||||
score: PostgreSQLConnectionScore,
|
||||
}
|
||||
|
||||
impl PostgreSQLConnectionInterpret {
|
||||
async fn fetch_app_secret<T: K8sclient>(&self, topo: &T) -> Result<Secret, InterpretError> {
|
||||
let app_secret_name = format!("{}-app", self.score.cluster_name);
|
||||
info!("Fetching app secret {}", app_secret_name);
|
||||
|
||||
let k8s_client = topo.k8s_client().await?;
|
||||
k8s_client
|
||||
.get_resource(&app_secret_name, Some(&self.score.namespace))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Failed to get app secret: {e}")))?
|
||||
.ok_or_else(|| InterpretError::new(format!("App secret {} not found", app_secret_name)))
|
||||
}
|
||||
|
||||
async fn fetch_ca_secret<T: K8sclient>(&self, topo: &T) -> Result<Secret, InterpretError> {
|
||||
let ca_secret_name = format!("{}-ca", self.score.cluster_name);
|
||||
info!("Fetching CA secret {}", ca_secret_name);
|
||||
|
||||
let k8s_client = topo.k8s_client().await?;
|
||||
k8s_client
|
||||
.get_resource(&ca_secret_name, Some(&self.score.namespace))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Failed to get CA secret: {e}")))?
|
||||
.ok_or_else(|| InterpretError::new(format!("CA secret {} not found", ca_secret_name)))
|
||||
}
|
||||
|
||||
fn get_secret_data(
|
||||
&self,
|
||||
secret: &Secret,
|
||||
secret_type: &str,
|
||||
) -> Result<BTreeMap<String, ByteString>, InterpretError> {
|
||||
secret
|
||||
.data
|
||||
.as_ref()
|
||||
.ok_or_else(|| InterpretError::new(format!("{} secret has no data", secret_type)))
|
||||
.map(|b| b.clone())
|
||||
}
|
||||
|
||||
fn create_temp_dir(&self) -> Result<tempfile::TempDir, InterpretError> {
|
||||
tempfile::Builder::new()
|
||||
.prefix("pg-connection-test-")
|
||||
.tempdir()
|
||||
.map_err(|e| InterpretError::new(format!("Failed to create temp directory: {e}")))
|
||||
}
|
||||
|
||||
fn write_ca_cert(
|
||||
&self,
|
||||
temp_dir: &Path,
|
||||
ca_data: &BTreeMap<String, ByteString>,
|
||||
) -> Result<PathBuf, InterpretError> {
|
||||
let ca_crt = ca_data
|
||||
.get("ca.crt")
|
||||
.ok_or_else(|| InterpretError::new("CA secret missing ca.crt".to_string()))?;
|
||||
let ca_file = temp_dir.join("ca.crt");
|
||||
|
||||
std::fs::write(&ca_file, &ca_crt.0)
|
||||
.map_err(|e| InterpretError::new(format!("Failed to write CA cert: {e}")))?;
|
||||
|
||||
Ok(ca_file)
|
||||
}
|
||||
|
||||
fn get_host(&self, data: &BTreeMap<String, ByteString>) -> Result<String, InterpretError> {
|
||||
self.score
|
||||
.hostname
|
||||
.clone()
|
||||
.or_else(|| decode_secret(data, "host").ok())
|
||||
.ok_or_else(|| {
|
||||
InterpretError::new("No hostname found in secret or override".to_string())
|
||||
})
|
||||
}
|
||||
|
||||
fn get_port(&self, data: &BTreeMap<String, ByteString>) -> Result<u16, InterpretError> {
|
||||
self.score
|
||||
.port_override
|
||||
.or_else(|| {
|
||||
decode_secret(data, "port")
|
||||
.ok()
|
||||
.and_then(|p| p.parse().ok())
|
||||
})
|
||||
.ok_or_else(|| InterpretError::new("Port not found in secret or override".to_string()))
|
||||
}
|
||||
|
||||
fn create_test_script(&self, temp_dir: &Path) -> Result<PathBuf, InterpretError> {
|
||||
let script_path = temp_dir.join("test_connection.sh");
|
||||
|
||||
let script_content = postgres_scipt_content();
|
||||
std::fs::write(&script_path, script_content)
|
||||
.map_err(|e| InterpretError::new(format!("Failed to write test script: {e}")))?;
|
||||
debug!("Wrote script content : \n{script_content}");
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = std::fs::metadata(&script_path)
|
||||
.map_err(|e| InterpretError::new(format!("Failed to get script metadata: {e}")))?
|
||||
.permissions();
|
||||
perms.set_mode(0o755);
|
||||
std::fs::set_permissions(&script_path, perms).map_err(|e| {
|
||||
InterpretError::new(format!("Failed to set script permissions: {e}"))
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(script_path)
|
||||
}
|
||||
|
||||
async fn run_docker_test(
|
||||
&self,
|
||||
temp_dir: &Path,
|
||||
cmd: &str,
|
||||
password: &str,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!("Running connection test in Docker container...");
|
||||
let container_cmd = format!("PGPASSWORD={} /tmp/test_connection.sh {}", password, cmd);
|
||||
debug!("Starting docker container with cmd : {container_cmd}");
|
||||
|
||||
let mut cmd = Command::new("docker");
|
||||
cmd.arg("run")
|
||||
.arg("--rm")
|
||||
.arg("-i")
|
||||
.arg("-v")
|
||||
.arg(format!("{}/:/tmp", temp_dir.display()))
|
||||
.arg("--workdir")
|
||||
.arg("/tmp")
|
||||
.arg("--entrypoint")
|
||||
.arg("/bin/sh")
|
||||
.arg("postgres:latest")
|
||||
.arg("-c")
|
||||
.arg(container_cmd)
|
||||
.env("PGPASSWORD", password)
|
||||
.stdout(std::process::Stdio::inherit())
|
||||
.stderr(std::process::Stdio::inherit());
|
||||
debug!("Running Command {cmd:?}");
|
||||
let output = cmd
|
||||
.spawn()
|
||||
.map_err(|e| InterpretError::new(format!("Failed to spawn docker container: {e}")))?
|
||||
.wait_with_output()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to wait for docker container: {e}"))
|
||||
})?;
|
||||
|
||||
if output.status.success() {
|
||||
info!("Successfully connected to PostgreSQL!");
|
||||
Ok(Outcome::success("Connection successful".to_string()))
|
||||
} else {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
error!("Connection failed: stdout:\n{stdout}\nstderr:\n{stderr}");
|
||||
Err(InterpretError::new(format!(
|
||||
"Connection failed: stdout:\n{stdout}\nstderr:\n{stderr}",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + Send + Sync> Interpret<T> for PostgreSQLConnectionInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("PostgreSQLConnectionInterpret")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn execute(&self, _inventory: &Inventory, topo: &T) -> Result<Outcome, InterpretError> {
|
||||
// Fetch secrets
|
||||
let app_secret = self.fetch_app_secret(topo).await?;
|
||||
trace!("Got app_secret {app_secret:?}");
|
||||
let ca_secret = self.fetch_ca_secret(topo).await?;
|
||||
trace!("Got ca_secret {ca_secret:?}");
|
||||
|
||||
// Get secret data
|
||||
let app_data = self.get_secret_data(&app_secret, "App")?;
|
||||
trace!("Got app_data {app_data:?}");
|
||||
let ca_data = self.get_secret_data(&ca_secret, "CA")?;
|
||||
trace!("Got ca_data {ca_data:?}");
|
||||
|
||||
// Create temp directory
|
||||
let temp_dir = self.create_temp_dir()?;
|
||||
let temp_dir_path = temp_dir.path();
|
||||
debug!("Created temp dir {temp_dir_path:?}");
|
||||
|
||||
// Write CA cert
|
||||
let ca_file = self.write_ca_cert(temp_dir_path, &ca_data)?;
|
||||
debug!("Wrote ca_file {ca_file:?}");
|
||||
|
||||
// Get connection details
|
||||
let username = decode_secret(&app_data, "username")?;
|
||||
let password = decode_secret(&app_data, "password")?;
|
||||
let dbname = decode_secret(&app_data, "dbname")?;
|
||||
let host = self.get_host(&app_data)?;
|
||||
let port = self.get_port(&app_data)?;
|
||||
|
||||
// Create test script
|
||||
let script_path = self.create_test_script(temp_dir_path)?;
|
||||
|
||||
let ca_file_in_container = Path::new("/tmp").join(ca_file.file_name().unwrap());
|
||||
let script_cmd = format!(
|
||||
"{host} {port} {username} {dbname} {}",
|
||||
ca_file_in_container.display()
|
||||
);
|
||||
debug!("Prepared test script in {}", temp_dir_path.display());
|
||||
// Run connection test
|
||||
self.run_docker_test(temp_dir_path, &script_cmd, &password)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
fn postgres_scipt_content() -> &'static str {
|
||||
r#"
|
||||
#!/bin/sh
|
||||
# PostgreSQL connection test and metrics collection script
|
||||
|
||||
# Basic connectivity test
|
||||
echo "=== CONNECTION TEST ==="
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT 1" > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: Connection failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "Connection successful"
|
||||
|
||||
# Database size metrics
|
||||
echo -e "\n=== DATABASE SIZE METRICS ==="
|
||||
echo "Total database size (MB):"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT pg_size_pretty(pg_database_size(current_database()))" -t -A
|
||||
|
||||
echo "Database size breakdown:"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT
|
||||
schema_name,
|
||||
pg_size_pretty(sum(table_size)) as total_size
|
||||
FROM (
|
||||
SELECT
|
||||
n.nspname as schema_name,
|
||||
c.relname as table_name,
|
||||
pg_total_relation_size(c.oid) as table_size
|
||||
FROM pg_class c
|
||||
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
|
||||
AND c.relkind = 'r'
|
||||
) t
|
||||
GROUP BY schema_name
|
||||
ORDER BY sum(table_size) DESC" -t
|
||||
|
||||
# Connection and activity metrics
|
||||
echo -e "\n=== CONNECTION & ACTIVITY ==="
|
||||
echo "Active connections:"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT count(*) FROM pg_stat_activity" -t -A
|
||||
|
||||
echo "Current queries (running longer than 1 second):"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT
|
||||
pid,
|
||||
usename,
|
||||
query_start,
|
||||
now() - query_start as duration,
|
||||
state,
|
||||
left(query, 50) as query_preview
|
||||
FROM pg_stat_activity
|
||||
WHERE state = 'active' AND now() - query_start > interval '1 second'
|
||||
ORDER BY duration DESC" -t
|
||||
|
||||
# Performance metrics
|
||||
echo -e "\n=== PERFORMANCE METRICS ==="
|
||||
echo "Database load (transactions per second):"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT
|
||||
tps,
|
||||
tps_commit,
|
||||
tps_rollback,
|
||||
blks_read,
|
||||
blks_hit,
|
||||
hit_ratio
|
||||
FROM (
|
||||
SELECT
|
||||
xact_commit as tps_commit,
|
||||
xact_rollback as tps_rollback,
|
||||
(xact_commit + xact_rollback) as tps,
|
||||
blks_read,
|
||||
blks_hit,
|
||||
CASE WHEN blks_read + blks_hit = 0 THEN 0 ELSE (blks_hit * 100.0 / (blks_read + blks_hit))::numeric(5,2) END as hit_ratio
|
||||
FROM pg_stat_database
|
||||
WHERE datname = current_database()
|
||||
) stats" -t
|
||||
|
||||
echo "Current locks:"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT
|
||||
count(*) as lock_count,
|
||||
string_agg(mode, ', ' ORDER BY mode) as lock_modes
|
||||
FROM pg_locks" -t
|
||||
|
||||
# Table statistics
|
||||
echo -e "\n=== TABLE STATISTICS ==="
|
||||
echo "Most accessed tables:"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT
|
||||
relname,
|
||||
seq_scan,
|
||||
idx_scan,
|
||||
n_tup_ins,
|
||||
n_tup_upd,
|
||||
n_tup_del
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY seq_scan + idx_scan + n_tup_ins + n_tup_upd + n_tup_del DESC
|
||||
LIMIT 10" -t
|
||||
|
||||
# Index usage
|
||||
echo -e "\n=== INDEX USAGE ==="
|
||||
echo "Index usage statistics:"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT
|
||||
indexrelname as index_name,
|
||||
idx_scan,
|
||||
idx_tup_read,
|
||||
idx_tup_fetch
|
||||
FROM pg_stat_user_indexes
|
||||
ORDER BY idx_scan DESC
|
||||
LIMIT 5" -t
|
||||
|
||||
# Configuration and limits
|
||||
echo -e "\n=== CONFIGURATION ==="
|
||||
echo "Current database parameters:"
|
||||
psql "host=$1 port=$2 user=$3 dbname=$4 sslmode=verify-ca sslrootcert=$5 sslnegotiation=direct" -c "SELECT
|
||||
name,
|
||||
setting,
|
||||
unit
|
||||
FROM pg_settings
|
||||
WHERE category = 'Resource Usage'
|
||||
ORDER BY name" -t
|
||||
|
||||
echo -e "\n=== TEST COMPLETE ==="
|
||||
echo "All metrics collected successfully"
|
||||
exit 0
|
||||
"#
|
||||
}
|
||||
80
harmony/src/modules/postgresql/score_k8s.rs
Normal file
80
harmony/src/modules/postgresql/score_k8s.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::interpret::Interpret;
|
||||
use crate::modules::k8s::resource::K8sResourceScore;
|
||||
use crate::modules::postgresql::capability::PostgreSQLConfig;
|
||||
use crate::modules::postgresql::cnpg::{Bootstrap, Cluster, ClusterSpec, Initdb, Storage};
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, Topology};
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
|
||||
/// Deploys an opinionated, highly available PostgreSQL cluster managed by CNPG.
|
||||
///
|
||||
/// # Usage
|
||||
/// ```
|
||||
/// use harmony::modules::postgresql::PostgreSQLScore;
|
||||
/// let score = PostgreSQLScore::new("my-app-ns");
|
||||
/// ```
|
||||
///
|
||||
/// # Limitations (Happy Path)
|
||||
/// - Requires CNPG operator installed (use CloudNativePgOperatorScore).
|
||||
/// - No backups, monitoring, extensions configured.
|
||||
///
|
||||
/// TODO : refactor this to declare a clean dependency on cnpg operator. Then cnpg operator will
|
||||
/// self-deploy either using operatorhub or helm chart depending on k8s flavor. This is cnpg
|
||||
/// specific behavior
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct K8sPostgreSQLScore {
|
||||
pub config: PostgreSQLConfig,
|
||||
}
|
||||
|
||||
impl Default for K8sPostgreSQLScore {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
config: PostgreSQLConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sPostgreSQLScore {
|
||||
pub fn new(namespace: &str) -> Self {
|
||||
Self {
|
||||
config: PostgreSQLConfig {
|
||||
namespace: namespace.to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for K8sPostgreSQLScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let metadata = ObjectMeta {
|
||||
name: Some(self.config.cluster_name.clone()),
|
||||
namespace: Some(self.config.namespace.clone()),
|
||||
..ObjectMeta::default()
|
||||
};
|
||||
|
||||
let spec = ClusterSpec {
|
||||
instances: self.config.instances,
|
||||
storage: Storage {
|
||||
size: self.config.storage_size.to_string(),
|
||||
},
|
||||
bootstrap: Bootstrap {
|
||||
initdb: Initdb {
|
||||
database: "app".to_string(),
|
||||
owner: "app".to_string(),
|
||||
},
|
||||
},
|
||||
..ClusterSpec::default()
|
||||
};
|
||||
|
||||
let cluster = Cluster { metadata, spec };
|
||||
|
||||
K8sResourceScore::single(cluster, Some(self.config.namespace.clone())).create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("PostgreSQLScore({})", self.config.namespace)
|
||||
}
|
||||
}
|
||||
104
harmony/src/modules/postgresql/score_public.rs
Normal file
104
harmony/src/modules/postgresql/score_public.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::data::Version;
|
||||
use crate::domain::topology::router::{TlsRoute, TlsRouter};
|
||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use crate::inventory::Inventory;
|
||||
use crate::modules::postgresql::capability::{PostgreSQL, PostgreSQLConfig};
|
||||
use crate::score::Score;
|
||||
use crate::topology::Topology;
|
||||
|
||||
/// Deploys a public PostgreSQL cluster: CNPG + TLS passthrough route for RW endpoint.
|
||||
/// For failover/multisite: exposes single-instance or small HA Postgres publicly.
|
||||
///
|
||||
/// Sequence: PostgreSQLScore → TlsRouter::install_route (RW backend).
|
||||
///
|
||||
/// # Usage
|
||||
/// ```
|
||||
/// use harmony::modules::postgresql::PublicPostgreSQLScore;
|
||||
/// let score = PublicPostgreSQLScore::new("harmony", "pg-rw.example.com");
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct PublicPostgreSQLScore {
|
||||
/// Inner non-public Postgres cluster config.
|
||||
pub config: PostgreSQLConfig,
|
||||
/// Public hostname for RW TLS passthrough (port 443 → cluster-rw:5432).
|
||||
pub hostname: String,
|
||||
}
|
||||
|
||||
impl PublicPostgreSQLScore {
|
||||
pub fn new(namespace: &str, hostname: &str) -> Self {
|
||||
Self {
|
||||
config: PostgreSQLConfig::default().with_namespace(namespace),
|
||||
hostname: hostname.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + PostgreSQL + TlsRouter + Send + Sync> Score<T> for PublicPostgreSQLScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let rw_backend = format!("{}-rw", self.config.cluster_name);
|
||||
let tls_route = TlsRoute {
|
||||
namespace: self.config.namespace.clone(),
|
||||
hostname: self.hostname.clone(),
|
||||
backend: rw_backend,
|
||||
target_port: 5432,
|
||||
};
|
||||
|
||||
Box::new(PublicPostgreSQLInterpret {
|
||||
config: self.config.clone(),
|
||||
tls_route,
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!(
|
||||
"PublicPostgreSQLScore({}:{})",
|
||||
self.config.namespace, self.hostname
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Custom interpret: deploy Postgres then install public TLS route.
|
||||
#[derive(Debug, Clone)]
|
||||
struct PublicPostgreSQLInterpret {
|
||||
config: PostgreSQLConfig,
|
||||
tls_route: TlsRoute,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + PostgreSQL + TlsRouter + Send + Sync> Interpret<T>
|
||||
for PublicPostgreSQLInterpret
|
||||
{
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("PublicPostgreSQLInterpret")
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
async fn execute(&self, _inventory: &Inventory, topo: &T) -> Result<Outcome, InterpretError> {
|
||||
// Deploy CNPG cluster first (creates -rw service)
|
||||
topo.deploy(&self.config)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e))?;
|
||||
|
||||
// Expose RW publicly via TLS passthrough
|
||||
topo.install_route(self.tls_route.clone())
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e))?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Public CNPG cluster '{}' deployed with TLS passthrough route '{}'",
|
||||
self.config.cluster_name.clone(),
|
||||
self.tls_route.hostname
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -9,3 +9,4 @@ license.workspace = true
|
||||
serde.workspace = true
|
||||
url.workspace = true
|
||||
rand.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
pub mod id;
|
||||
pub mod net;
|
||||
pub mod rfc1123;
|
||||
pub mod storage;
|
||||
pub mod switch;
|
||||
|
||||
231
harmony_types/src/rfc1123.rs
Normal file
231
harmony_types/src/rfc1123.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
/// A String that can be used as a subdomain.
|
||||
///
|
||||
/// This means the name must:
|
||||
///
|
||||
/// - contain no more than 253 characters
|
||||
/// - contain only lowercase alphanumeric characters, '-' or '.'
|
||||
/// - start with an alphanumeric character
|
||||
/// - end with an alphanumeric character
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc1123
|
||||
///
|
||||
/// This is relevant in harmony since most k8s resource names are required to be usable as dns
|
||||
/// subdomains.
|
||||
///
|
||||
/// See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Rfc1123Name {
|
||||
content: String,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for Rfc1123Name {
|
||||
fn try_from(s: &str) -> Result<Self, String> {
|
||||
let mut content = s.to_lowercase();
|
||||
|
||||
// Remove invalid characters
|
||||
content.retain(|c| c.is_ascii_alphanumeric() || c == '-' || c == '.');
|
||||
|
||||
// Enforce max length
|
||||
if content.len() > 253 {
|
||||
content.truncate(253);
|
||||
}
|
||||
|
||||
// Trim leading/trailing dots
|
||||
content = content.trim_matches('.').to_string();
|
||||
|
||||
// Deduplicate consecutive dots
|
||||
loop {
|
||||
let new_content = content.replace("..", ".");
|
||||
if new_content == content {
|
||||
break;
|
||||
}
|
||||
content = new_content;
|
||||
}
|
||||
|
||||
// Trim leading/trailing non-alphanumeric
|
||||
content = content
|
||||
.trim_matches(|c: char| !c.is_ascii_alphanumeric())
|
||||
.to_string();
|
||||
|
||||
if content.is_empty() {
|
||||
return Err(format!("Input '{}' resulted in empty string", s));
|
||||
}
|
||||
|
||||
Ok(Self { content })
|
||||
}
|
||||
|
||||
type Error = String;
|
||||
}
|
||||
|
||||
/// Converts an `Rfc1123Name` into a `String`.
|
||||
///
|
||||
/// This allows using `Rfc1123Name` in contexts where a `String` is expected.
|
||||
impl From<Rfc1123Name> for String {
|
||||
fn from(name: Rfc1123Name) -> Self {
|
||||
name.content
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializes the `Rfc1123Name` as a string.
|
||||
///
|
||||
/// This directly serializes the inner `String` content without additional wrapping.
|
||||
impl serde::Serialize for Rfc1123Name {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.content)
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserializes an `Rfc1123Name` from a string.
|
||||
///
|
||||
/// This directly deserializes into the inner `String` content without additional wrapping.
|
||||
impl<'de> serde::Deserialize<'de> for Rfc1123Name {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let content = String::deserialize(deserializer)?;
|
||||
Ok(Self { content })
|
||||
}
|
||||
}
|
||||
|
||||
/// Displays the `Rfc1123Name` as a string.
|
||||
///
|
||||
/// This directly displays the inner `String` content without additional wrapping.
|
||||
impl std::fmt::Display for Rfc1123Name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.content)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Rfc1123Name;
|
||||
|
||||
#[test]
|
||||
fn test_try_from_empty() {
|
||||
let name = Rfc1123Name::try_from("");
|
||||
assert!(name.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_valid() {
|
||||
let name = Rfc1123Name::try_from("hello-world").unwrap();
|
||||
assert_eq!(name.content, "hello-world");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_uppercase() {
|
||||
let name = Rfc1123Name::try_from("Hello-World").unwrap();
|
||||
assert_eq!(name.content, "hello-world");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_invalid_chars() {
|
||||
let name = Rfc1123Name::try_from("hel@lo#w!or%ld123").unwrap();
|
||||
assert_eq!(name.content, "helloworld123");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_leading_dot() {
|
||||
let name = Rfc1123Name::try_from(".hello").unwrap();
|
||||
assert_eq!(name.content, "hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_trailing_dot() {
|
||||
let name = Rfc1123Name::try_from("hello.").unwrap();
|
||||
assert_eq!(name.content, "hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_leading_hyphen() {
|
||||
let name = Rfc1123Name::try_from("-hello").unwrap();
|
||||
assert_eq!(name.content, "hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_complicated_string() {
|
||||
let name = Rfc1123Name::try_from("--h--e,}{}12!$#)\np_aulbS\r\t.!@o--._--").unwrap();
|
||||
assert_eq!(name.content, "h--e12paulbs.o");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_trailing_hyphen() {
|
||||
let name = Rfc1123Name::try_from("hello-").unwrap();
|
||||
assert_eq!(name.content, "hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_single_hyphen() {
|
||||
let name = Rfc1123Name::try_from("-");
|
||||
assert!(name.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_str() {
|
||||
let name: Rfc1123Name = "test-name".try_into().unwrap();
|
||||
assert_eq!(name.content, "test-name");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_string() {
|
||||
let name = Rfc1123Name::try_from("test").unwrap();
|
||||
let s: String = name.into();
|
||||
assert_eq!(s, "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compliance() {
|
||||
let inputs = vec![
|
||||
"valid",
|
||||
"in-VALID",
|
||||
".dots",
|
||||
"-hyphen",
|
||||
"hyphen-",
|
||||
"!!1@",
|
||||
"aaaaaaaaaa",
|
||||
"--abc--",
|
||||
"a.b-c",
|
||||
];
|
||||
|
||||
for input in inputs {
|
||||
let name = Rfc1123Name::try_from(input).unwrap();
|
||||
let s = &name.content;
|
||||
// Check only allowed characters
|
||||
for c in s.chars() {
|
||||
assert!(c.is_ascii_alphanumeric() || c == '-' || c == '.');
|
||||
}
|
||||
// Check starts and ends with alphanumeric
|
||||
if !s.is_empty() {
|
||||
assert!(s.chars().next().unwrap().is_ascii_alphanumeric());
|
||||
assert!(s.chars().last().unwrap().is_ascii_alphanumeric());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enforces_max_length() {
|
||||
let long_input = "a".repeat(300);
|
||||
let name = Rfc1123Name::try_from(long_input.as_str()).unwrap();
|
||||
assert_eq!(name.content.len(), 253);
|
||||
assert_eq!(name.content, "a".repeat(253));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_trim_end() {
|
||||
let input = "a".repeat(252) + "-";
|
||||
let name = Rfc1123Name::try_from(input.as_str()).unwrap();
|
||||
assert_eq!(name.content.len(), 252);
|
||||
assert_eq!(name.content, "a".repeat(252));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dedup_dots() {
|
||||
let input = "a..b...c";
|
||||
let name = Rfc1123Name::try_from(input).unwrap();
|
||||
assert_eq!(name.content, "a.b.c");
|
||||
}
|
||||
}
|
||||
171
harmony_types/src/storage.rs
Normal file
171
harmony_types/src/storage.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord, Debug)]
|
||||
pub struct StorageSize {
|
||||
size_bytes: u64,
|
||||
#[serde(skip)]
|
||||
display_value: Option<u64>,
|
||||
#[serde(skip)]
|
||||
display_suffix: Option<String>,
|
||||
}
|
||||
|
||||
impl StorageSize {
|
||||
pub fn new(size_bytes: u64) -> Self {
|
||||
Self {
|
||||
size_bytes,
|
||||
display_value: None,
|
||||
display_suffix: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn b(size: u64) -> Self {
|
||||
Self {
|
||||
size_bytes: size,
|
||||
display_value: Some(size),
|
||||
display_suffix: Some("B".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kb(size: u64) -> Self {
|
||||
Self {
|
||||
size_bytes: size * 1024,
|
||||
display_value: Some(size),
|
||||
display_suffix: Some("KB".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mb(size: u64) -> Self {
|
||||
Self {
|
||||
size_bytes: size * 1024 * 1024,
|
||||
display_value: Some(size),
|
||||
display_suffix: Some("MB".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gb(size: u64) -> Self {
|
||||
Self {
|
||||
size_bytes: size * 1024 * 1024 * 1024,
|
||||
display_value: Some(size),
|
||||
display_suffix: Some("GB".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gi(size: u64) -> Self {
|
||||
Self {
|
||||
size_bytes: size * 1024 * 1024 * 1024,
|
||||
display_value: Some(size),
|
||||
display_suffix: Some("Gi".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tb(size: u64) -> Self {
|
||||
Self {
|
||||
size_bytes: size * 1024 * 1024 * 1024 * 1024,
|
||||
display_value: Some(size),
|
||||
display_suffix: Some("TB".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ti(size: u64) -> Self {
|
||||
Self {
|
||||
size_bytes: size * 1024 * 1024 * 1024 * 1024,
|
||||
display_value: Some(size),
|
||||
display_suffix: Some("Ti".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes(&self) -> u64 {
|
||||
self.size_bytes
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StorageSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if let Some(suffix) = &self.display_suffix {
|
||||
let value = self.display_value.unwrap_or(self.size_bytes);
|
||||
write!(f, "{}{}", value, suffix)
|
||||
} else {
|
||||
write!(f, "{}B", self.size_bytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bytes() {
|
||||
let size = StorageSize::b(123);
|
||||
assert_eq!(size.bytes(), 123);
|
||||
assert_eq!(size.to_string(), "123B");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_kilobytes() {
|
||||
let size = StorageSize::kb(2);
|
||||
assert_eq!(size.bytes(), 2048);
|
||||
assert_eq!(size.to_string(), "2KB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_megabytes() {
|
||||
let size = StorageSize::mb(3);
|
||||
assert_eq!(size.bytes(), 3 * 1024 * 1024);
|
||||
assert_eq!(size.to_string(), "3MB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gigabytes() {
|
||||
let size = StorageSize::gb(4);
|
||||
assert_eq!(size.bytes(), 4 * 1024 * 1024 * 1024);
|
||||
assert_eq!(size.to_string(), "4GB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gibibytes() {
|
||||
let size = StorageSize::gi(1);
|
||||
assert_eq!(size.bytes(), 1024 * 1024 * 1024);
|
||||
assert_eq!(size.to_string(), "1Gi");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_terabytes() {
|
||||
let size = StorageSize::tb(5);
|
||||
assert_eq!(size.bytes(), 5 * 1024 * 1024 * 1024 * 1024);
|
||||
assert_eq!(size.to_string(), "5TB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tebibytes() {
|
||||
let size = StorageSize::ti(1);
|
||||
assert_eq!(size.bytes(), 1024 * 1024 * 1024 * 1024);
|
||||
assert_eq!(size.to_string(), "1Ti");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_without_suffix() {
|
||||
let size = StorageSize::new(999);
|
||||
assert_eq!(size.bytes(), 999);
|
||||
assert_eq!(size.to_string(), "999B");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serde_roundtrip() {
|
||||
let original = StorageSize::gi(1);
|
||||
let serialized = serde_json::to_string(&original).unwrap();
|
||||
let deserialized: StorageSize = serde_json::from_str(&serialized).unwrap();
|
||||
|
||||
assert_eq!(original.bytes(), deserialized.bytes());
|
||||
// Note: suffix is lost during serialization/deserialization
|
||||
assert_ne!(original.to_string(), deserialized.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ord() {
|
||||
let one_gb = StorageSize::gb(1);
|
||||
let one_gi = StorageSize::gi(1);
|
||||
assert!(one_gb < one_gi); // 1GB = 1000MB, 1Gi = 1024MB
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user