Compare commits
81 Commits
better-cli
...
0a5da43c76
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0a5da43c76 | ||
| 62fa3c2b10 | |||
| ea1380f98a | |||
| 701d8cfab9 | |||
| f9906cb419 | |||
| cb4382fbb5 | |||
| 1eca2cc1a9 | |||
| 269f13ae9b | |||
| ec277bc13d | |||
| a9f8cd16ea | |||
| c542a935e3 | |||
| 0395d11e98 | |||
| 05e7b8075c | |||
| b857412151 | |||
| 7bb3602ab8 | |||
| 78b80c2169 | |||
| 0876f4e4f0 | |||
| 6ac0e095a3 | |||
| ff2efc0a66 | |||
|
|
f180cc4c80 | ||
| 3ca31179d0 | |||
| a9fe4ab267 | |||
| 65cc9befeb | |||
| d456a1f9ee | |||
| 5895f867cf | |||
| 8cc7adf196 | |||
| a1ab5d40fb | |||
| 6c92dd24f7 | |||
| c805d7e018 | |||
| b33615b969 | |||
| 0f59f29ac4 | |||
| 361f240762 | |||
| 57c3b01e66 | |||
| 94ddf027dd | |||
| 06a2be4496 | |||
| e2a09efdee | |||
| d36c574590 | |||
| 2618441de3 | |||
| da6610c625 | |||
| e956772593 | |||
| 27c51e0ec5 | |||
| bfca9cf163 | |||
| 597dcbc848 | |||
| cd3ea6fc10 | |||
| a53e8552e9 | |||
| 89eb88d10e | |||
| 72fb05b5cc | |||
| 6685b05cc5 | |||
| 07116eb8a6 | |||
| 3f34f868eb | |||
| bc6f7336d2 | |||
| 01da8631da | |||
| 67b5c2df07 | |||
| 1eaf63417b | |||
| 5e7803d2ba | |||
| 9a610661c7 | |||
| 70a65ed5d0 | |||
| 26e8e386b9 | |||
| 19cb7f73bc | |||
| 84f38974b1 | |||
| 7d027bcfc4 | |||
| d1a274b705 | |||
| b43ca7c740 | |||
| 2a6a233fb2 | |||
|
|
610ce84280 | ||
|
|
8bb4a9d3f6 | ||
|
|
67f3a23071 | ||
| d86970f81b | |||
| 623a3f019b | |||
| fd8f643a8f | |||
|
|
bd214f8fb8 | ||
| f0ed548755 | |||
| 1de96027a1 | |||
| 0812937a67 | |||
| 29a261575b | |||
| dcf8335240 | |||
|
|
f876b5e67b | ||
| 440c1bce12 | |||
| 024084859e | |||
| 54990cd1a5 | |||
| 06aab1f57f |
@@ -9,7 +9,7 @@ jobs:
|
||||
check:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -7,7 +7,7 @@ on:
|
||||
jobs:
|
||||
package_harmony_composer:
|
||||
container:
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
||||
runs-on: dind
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -45,14 +45,14 @@ jobs:
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
|
||||
| jq -r '.id // empty')
|
||||
|
||||
|
||||
if [ -n "$RELEASE_ID" ]; then
|
||||
# Delete existing release
|
||||
curl -X DELETE \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
|
||||
fi
|
||||
|
||||
|
||||
# Create new release
|
||||
RESPONSE=$(curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
"prerelease": true
|
||||
}' \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
|
||||
|
||||
|
||||
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Linux binary
|
||||
|
||||
32
.sqlx/query-934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990.json
generated
Normal file
32
.sqlx/query-934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990.json
generated
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id, version_id, data as \"data: Json<PhysicalHost>\" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "version_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "data: Json<PhysicalHost>",
|
||||
"ordinal": 2,
|
||||
"type_info": "Null"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990"
|
||||
}
|
||||
12
.sqlx/query-f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8.json
generated
Normal file
12
.sqlx/query-f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8"
|
||||
}
|
||||
1732
Cargo.lock
generated
1732
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
16
Cargo.toml
16
Cargo.toml
@@ -12,6 +12,9 @@ members = [
|
||||
"harmony_cli",
|
||||
"k3d",
|
||||
"harmony_composer",
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
"harmony_secret", "adr/agent_discovery/mdns",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -20,7 +23,7 @@ readme = "README.md"
|
||||
license = "GNU AGPL v3"
|
||||
|
||||
[workspace.dependencies]
|
||||
log = "0.4"
|
||||
log = { version = "0.4", features = ["kv"] }
|
||||
env_logger = "0.11"
|
||||
derive-new = "0.7"
|
||||
async-trait = "0.1"
|
||||
@@ -33,7 +36,7 @@ tokio = { version = "1.40", features = [
|
||||
cidr = { features = ["serde"], version = "0.2" }
|
||||
russh = "0.45"
|
||||
russh-keys = "0.45"
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
url = "2.5"
|
||||
kube = { version = "1.1.0", features = [
|
||||
"config",
|
||||
@@ -53,6 +56,15 @@ chrono = "0.4"
|
||||
similar = "2"
|
||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3.20.0"
|
||||
bollard = "0.19.1"
|
||||
base64 = "0.22.1"
|
||||
tar = "0.4.44"
|
||||
lazy_static = "1.5.0"
|
||||
directories = "6.0.0"
|
||||
thiserror = "2.0.14"
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
askama = "0.14"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/rust:1.87.0 AS build
|
||||
FROM docker.io/rust:1.89.0 AS build
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -6,13 +6,14 @@ COPY . .
|
||||
|
||||
RUN cargo build --release --bin harmony_composer
|
||||
|
||||
FROM docker.io/rust:1.87.0
|
||||
FROM docker.io/rust:1.89.0
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN rustup target add x86_64-pc-windows-gnu
|
||||
RUN rustup target add x86_64-unknown-linux-gnu
|
||||
RUN rustup component add rustfmt
|
||||
RUN rustup component add clippy
|
||||
|
||||
RUN apt update
|
||||
|
||||
@@ -22,4 +23,4 @@ RUN apt install -y nodejs docker.io mingw-w64
|
||||
|
||||
COPY --from=build /app/target/release/harmony_composer .
|
||||
|
||||
ENTRYPOINT ["/app/harmony_composer"]
|
||||
ENTRYPOINT ["/app/harmony_composer"]
|
||||
|
||||
73
README.md
73
README.md
@@ -1,5 +1,6 @@
|
||||
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code.
|
||||
*By [NationTech](https://nationtech.io)*
|
||||
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
|
||||
|
||||
_By [NationTech](https://nationtech.io)_
|
||||
|
||||
[](https://git.nationtech.io/nationtech/harmony)
|
||||
[](LICENSE)
|
||||
@@ -23,11 +24,11 @@ From a **developer laptop** to a **global production cluster**, a single **sourc
|
||||
|
||||
Infrastructure is essential, but it shouldn’t be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
|
||||
|
||||
| Principle | What it means for you |
|
||||
|-----------|-----------------------|
|
||||
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
|
||||
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your application’s needs match the target environment’s capabilities at **compile-time**, eliminating an entire class of runtime outages. |
|
||||
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
|
||||
| Principle | What it means for you |
|
||||
| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
|
||||
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your application’s needs match the target environment’s capabilities at **compile-time**, eliminating an entire class of runtime outages. |
|
||||
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
|
||||
|
||||
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
|
||||
|
||||
@@ -63,22 +64,20 @@ async fn main() {
|
||||
},
|
||||
};
|
||||
|
||||
// 2. Pick where it should run
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// 3. Enhance with extra scores (monitoring, CI/CD, …)
|
||||
// 2. Enhance with extra scores (monitoring, CI/CD, …)
|
||||
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||
|
||||
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring)]);
|
||||
|
||||
// 4. Launch an interactive CLI / TUI
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
// 3. Run your scores on the desired topology & inventory
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||
vec![
|
||||
Box::new(lamp_stack),
|
||||
Box::new(monitoring)
|
||||
],
|
||||
None
|
||||
).await.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
@@ -94,13 +93,13 @@ Harmony analyses the code, shows an execution plan in a TUI, and applies it once
|
||||
|
||||
## 3 · Core Concepts
|
||||
|
||||
| Term | One-liner |
|
||||
|------|-----------|
|
||||
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
|
||||
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
|
||||
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified *Capabilities* (Kubernetes, DNS, …). |
|
||||
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
|
||||
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
|
||||
| Term | One-liner |
|
||||
| ---------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
|
||||
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
|
||||
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
|
||||
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
|
||||
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
|
||||
|
||||
A visual overview is in the diagram below.
|
||||
|
||||
@@ -112,9 +111,9 @@ A visual overview is in the diagram below.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
* Rust
|
||||
* Docker (if you deploy locally)
|
||||
* `kubectl` / `helm` for Kubernetes-based topologies
|
||||
- Rust
|
||||
- Docker (if you deploy locally)
|
||||
- `kubectl` / `helm` for Kubernetes-based topologies
|
||||
|
||||
```bash
|
||||
git clone https://git.nationtech.io/nationtech/harmony
|
||||
@@ -126,15 +125,15 @@ cargo build --release # builds the CLI, TUI and libraries
|
||||
|
||||
## 5 · Learning More
|
||||
|
||||
* **Architectural Decision Records** – dive into the rationale
|
||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||
- **Architectural Decision Records** – dive into the rationale
|
||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
||||
|
||||
* **Extending Harmony** – write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
|
||||
- **Extending Harmony** – write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
|
||||
|
||||
* **Community** – discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||
- **Community** – discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||
|
||||
---
|
||||
|
||||
@@ -148,4 +147,4 @@ See [LICENSE](LICENSE) for the full text.
|
||||
|
||||
---
|
||||
|
||||
*Made with ❤️ & 🦀 by the NationTech and the Harmony community*
|
||||
_Made with ❤️ & 🦀 by the NationTech and the Harmony community_
|
||||
|
||||
17
adr/agent_discovery/mdns/Cargo.toml
Normal file
17
adr/agent_discovery/mdns/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "mdns"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
mdns-sd = "0.14"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
futures = "0.3"
|
||||
dmidecode = "0.2" # For getting the motherboard ID on the agent
|
||||
log.workspace=true
|
||||
env_logger.workspace=true
|
||||
clap = { version = "4.5.46", features = ["derive"] }
|
||||
get_if_addrs = "0.5.3"
|
||||
local-ip-address = "0.6.5"
|
||||
60
adr/agent_discovery/mdns/src/advertise.rs
Normal file
60
adr/agent_discovery/mdns/src/advertise.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// harmony-agent/src/main.rs
|
||||
|
||||
use log::info;
|
||||
use mdns_sd::{ServiceDaemon, ServiceInfo};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::SERVICE_TYPE;
|
||||
|
||||
// The service we are advertising.
|
||||
const SERVICE_PORT: u16 = 43210; // A port for the service. It needs one, even if unused.
|
||||
|
||||
pub async fn advertise() {
|
||||
info!("Starting Harmony Agent...");
|
||||
|
||||
// Get a unique ID for this machine.
|
||||
let motherboard_id = "some motherboard id";
|
||||
let instance_name = format!("harmony-agent-{}", motherboard_id);
|
||||
info!("This agent's instance name: {}", instance_name);
|
||||
info!("Advertising with ID: {}", motherboard_id);
|
||||
|
||||
// Create a new mDNS daemon.
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
||||
|
||||
// Create a TXT record HashMap to hold our metadata.
|
||||
let mut properties = HashMap::new();
|
||||
properties.insert("id".to_string(), motherboard_id.to_string());
|
||||
properties.insert("version".to_string(), "1.0".to_string());
|
||||
|
||||
// Create the service information.
|
||||
// The instance name should be unique on the network.
|
||||
let local_ip = local_ip_address::local_ip().unwrap();
|
||||
let service_info = ServiceInfo::new(
|
||||
SERVICE_TYPE,
|
||||
&instance_name,
|
||||
"harmony-host.local.", // A hostname for the service
|
||||
local_ip,
|
||||
// "0.0.0.0",
|
||||
SERVICE_PORT,
|
||||
Some(properties),
|
||||
)
|
||||
.expect("Failed to create service info");
|
||||
|
||||
// Register our service with the daemon.
|
||||
mdns.register(service_info)
|
||||
.expect("Failed to register service");
|
||||
|
||||
info!(
|
||||
"Service '{}' registered and now being advertised.",
|
||||
instance_name
|
||||
);
|
||||
info!("Agent is running. Press Ctrl+C to exit.");
|
||||
|
||||
for iface in get_if_addrs::get_if_addrs().unwrap() {
|
||||
println!("{:#?}", iface);
|
||||
}
|
||||
|
||||
// Keep the agent running indefinitely.
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Shutting down agent.");
|
||||
}
|
||||
110
adr/agent_discovery/mdns/src/discover.rs
Normal file
110
adr/agent_discovery/mdns/src/discover.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use log::debug;
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
|
||||
use crate::SERVICE_TYPE;
|
||||
|
||||
pub async fn discover() {
|
||||
println!("Starting Harmony Master and browsing for agents...");
|
||||
|
||||
// Create a new mDNS daemon.
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
||||
|
||||
// Start browsing for the service type.
|
||||
// The receiver will be a stream of events.
|
||||
let receiver = mdns.browse(SERVICE_TYPE).expect("Failed to browse");
|
||||
|
||||
println!(
|
||||
"Listening for mDNS events for '{}'. Press Ctrl+C to exit.",
|
||||
SERVICE_TYPE
|
||||
);
|
||||
|
||||
std::thread::spawn(move || {
|
||||
while let Ok(event) = receiver.recv() {
|
||||
match event {
|
||||
ServiceEvent::ServiceData(resolved) => {
|
||||
println!("Resolved a new service: {}", resolved.fullname);
|
||||
}
|
||||
other_event => {
|
||||
println!("Received other event: {:?}", &other_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Gracefully shutdown the daemon.
|
||||
std::thread::sleep(std::time::Duration::from_secs(1000000));
|
||||
mdns.shutdown().unwrap();
|
||||
|
||||
// Process events as they come in.
|
||||
// while let Ok(event) = receiver.recv_async().await {
|
||||
// debug!("Received event {event:?}");
|
||||
// // match event {
|
||||
// // ServiceEvent::ServiceFound(svc_type, fullname) => {
|
||||
// // println!("\n--- Agent Discovered ---");
|
||||
// // println!(" Service Name: {}", fullname());
|
||||
// // // You can now resolve this service to get its IP, port, and TXT records
|
||||
// // // The resolve operation is a separate network call.
|
||||
// // let receiver = mdns.browse(info.get_fullname()).unwrap();
|
||||
// // if let Ok(resolve_event) = receiver.recv_timeout(Duration::from_secs(2)) {
|
||||
// // if let ServiceEvent::ServiceResolved(info) = resolve_event {
|
||||
// // let ip = info.get_addresses().iter().next().unwrap();
|
||||
// // let port = info.get_port();
|
||||
// // let motherboard_id = info.get_property("id").map_or("N/A", |v| v.val_str());
|
||||
// //
|
||||
// // println!(" IP: {}:{}", ip, port);
|
||||
// // println!(" Motherboard ID: {}", motherboard_id);
|
||||
// // println!("------------------------");
|
||||
// //
|
||||
// // // TODO: Add this agent to your central list of discovered hosts.
|
||||
// // }
|
||||
// // } else {
|
||||
// // println!("Could not resolve service '{}' in time.", info.get_fullname());
|
||||
// // }
|
||||
// // }
|
||||
// // ServiceEvent::ServiceRemoved(info) => {
|
||||
// // println!("\n--- Agent Removed ---");
|
||||
// // println!(" Service Name: {}", info.get_fullname());
|
||||
// // println!("---------------------");
|
||||
// // // TODO: Remove this agent from your list.
|
||||
// // }
|
||||
// // _ => {
|
||||
// // // We don't care about other event types for this example
|
||||
// // }
|
||||
// // }
|
||||
// }
|
||||
}
|
||||
|
||||
async fn discover_example() {
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
|
||||
// Create a daemon
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create daemon");
|
||||
|
||||
// Use recently added `ServiceEvent::ServiceData`.
|
||||
mdns.use_service_data(true)
|
||||
.expect("Failed to use ServiceData");
|
||||
|
||||
// Browse for a service type.
|
||||
let service_type = "_mdns-sd-my-test._udp.local.";
|
||||
let receiver = mdns.browse(service_type).expect("Failed to browse");
|
||||
|
||||
// Receive the browse events in sync or async. Here is
|
||||
// an example of using a thread. Users can call `receiver.recv_async().await`
|
||||
// if running in async environment.
|
||||
std::thread::spawn(move || {
|
||||
while let Ok(event) = receiver.recv() {
|
||||
match event {
|
||||
ServiceEvent::ServiceData(resolved) => {
|
||||
println!("Resolved a new service: {}", resolved.fullname);
|
||||
}
|
||||
other_event => {
|
||||
println!("Received other event: {:?}", &other_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Gracefully shutdown the daemon.
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
mdns.shutdown().unwrap();
|
||||
}
|
||||
31
adr/agent_discovery/mdns/src/main.rs
Normal file
31
adr/agent_discovery/mdns/src/main.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use clap::{Parser, ValueEnum};
|
||||
|
||||
mod advertise;
|
||||
mod discover;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
#[arg(value_enum)]
|
||||
profile: Profiles,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
|
||||
enum Profiles {
|
||||
Advertise,
|
||||
Discover,
|
||||
}
|
||||
|
||||
// The service type we are looking for.
|
||||
const SERVICE_TYPE: &str = "_harmony._tcp.local.";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
let args = Args::parse();
|
||||
|
||||
match args.profile {
|
||||
Profiles::Advertise => advertise::advertise().await,
|
||||
Profiles::Discover => discover::discover().await,
|
||||
}
|
||||
}
|
||||
3
check.sh
3
check.sh
@@ -1,5 +1,8 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
rustc --version
|
||||
cargo check --all-targets --all-features --keep-going
|
||||
cargo fmt --check
|
||||
cargo clippy
|
||||
cargo test
|
||||
|
||||
8
data/pxe/okd/README.md
Normal file
8
data/pxe/okd/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
Here lies all the data files required for an OKD cluster PXE boot setup.
|
||||
|
||||
This inclues ISO files, binary boot files, ipxe, etc.
|
||||
|
||||
TODO as of august 2025 :
|
||||
|
||||
- `harmony_inventory_agent` should be downloaded from official releases, this embedded version is practical for now though
|
||||
- The cluster ssh key should be generated and handled by harmony with the private key saved in a secret store
|
||||
9
data/pxe/okd/http_files/.gitattributes
vendored
Normal file
9
data/pxe/okd/http_files/.gitattributes
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
harmony_inventory_agent filter=lfs diff=lfs merge=lfs -text
|
||||
os filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9 filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/initrd.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/vmlinuz filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/efiboot.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/install.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/pxeboot filter=lfs diff=lfs merge=lfs -text
|
||||
1
data/pxe/okd/http_files/cluster_ssh_key.pub
Normal file
1
data/pxe/okd/http_files/cluster_ssh_key.pub
Normal file
@@ -0,0 +1 @@
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Executable file
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/install.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/install.img
(Stored with Git LFS)
Normal file
Binary file not shown.
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/pxeboot/vmlinuz
Executable file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/pxeboot/vmlinuz
Executable file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/initrd.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/initrd.img
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
(Stored with Git LFS)
Executable file
BIN
data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/pxe/okd/tftpboot/ipxe.efi
Normal file
BIN
data/pxe/okd/tftpboot/ipxe.efi
Normal file
Binary file not shown.
BIN
data/pxe/okd/tftpboot/undionly.kpxe
Normal file
BIN
data/pxe/okd/tftpboot/undionly.kpxe
Normal file
Binary file not shown.
132
demos/cncf-k8s-quebec-meetup-september-2025/storyline.md
Normal file
132
demos/cncf-k8s-quebec-meetup-september-2025/storyline.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# Harmony, Orchestrateur d'infrastructure open-source
|
||||
|
||||
**Target Duration:** 25 minutes\
|
||||
**Tone:** Friendly, expert-to-expert, inspiring.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 1: Title Slide**
|
||||
|
||||
- **Visual:** Clean and simple. Your company logo (NationTech) and the Harmony logo.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 2: The YAML Labyrinth**
|
||||
|
||||
**Goal:** Get every head in the room nodding in agreement. Start with their world, not yours.
|
||||
|
||||
- **Visual:**
|
||||
- Option A: "The Pull Request from Hell". A screenshot of a GitHub pull request for a seemingly minor change that touches dozens of YAML files across multiple directories. A sea of red and green diffs that is visually overwhelming.
|
||||
- Option B: A complex flowchart connecting dozens of logos: Terraform, Ansible, K8s, Helm, etc.
|
||||
- **Narration:**\
|
||||
[...ADD SOMETHING FOR INTRODUCTION...]\
|
||||
"We love the power that tools like Kubernetes and the CNCF landscape have given us. But let's be honest... when did our infrastructure code start looking like _this_?"\
|
||||
"We have GitOps, which is great. But it often means we're managing this fragile cathedral of YAML, Helm charts, and brittle scripts. We spend more time debugging indentation and tracing variables than we do building truly resilient systems."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 3: The Real Cost of Infrastructure**
|
||||
|
||||
- **Visual:** "The Jenga Tower of Tools". A tall, precarious Jenga tower where each block is the logo of a different tool (Terraform, K8s, Helm, Ansible, Prometheus, ArgoCD, etc.). One block near the bottom is being nervously pulled out.
|
||||
- **Narration:**
|
||||
"The real cost isn't just complexity; it's the constant need to choose, learn, integrate, and operate a dozen different tools, each with its own syntax and failure modes. It's the nagging fear that a tiny typo in a config file could bring everything down. Click-ops isn't the answer, but the current state of IaC feels like we've traded one problem for another."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 4: The Broken Promise of "Code"**
|
||||
|
||||
**Goal:** Introduce the core idea before introducing the product. This makes the solution feel inevitable.
|
||||
|
||||
- **(Initial Visual):** A two-panel slide.
|
||||
- **Left Panel Title: "The Plan"** - A terminal showing a green, successful `terraform plan` output.
|
||||
- **Right Panel Title: "The Reality"** - The _next_ screen in the terminal, showing the `terraform apply` failing with a cascade of red error text.
|
||||
- **Narration:**
|
||||
"We call our discipline **Infrastructure as Code**. And we've all been here. Our 'compiler' is a `terraform plan` that says everything looks perfect. We get the green light."
|
||||
(Pause for a beat)
|
||||
"And then we `apply`, and reality hits. It fails halfway through, at runtime, when it's most expensive and painful to fix."
|
||||
|
||||
**(Click to transition the slide)**
|
||||
|
||||
- **(New Visual):** The entire slide is replaced by a clean screenshot of a code editor (like nvim 😉) showing Harmony's Rust DSL. A red squiggly line is under a config line. The error message is clear in the "Problems" panel: `error: Incompatible deployment. Production target 'gcp-prod-cluster' requires a StorageClass with 'snapshots' capability, but 'standard-sc' does not provide it.`
|
||||
- **Narration (continued):**
|
||||
"In software development, we solved these problems years ago. We don't accept 'it compiled, but crashed on startup'. We have real tools, type systems, compilers, test frameworks, and IDEs that catch our mistakes before they ever reach production. **So, what if we could treat our entire infrastructure... like a modern, compiled application?**"
|
||||
"What if your infrastructure code could get compile-time checks, straight into the editor... instead of runtime panics and failures at 3 AM in production?"
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 5: Introducing Harmony**
|
||||
|
||||
**Goal:** Introduce Harmony as the answer to the "What If?" question.
|
||||
|
||||
- **Visual:** The Harmony logo, large and centered.
|
||||
- **Tagline:** `Infrastructure in type-safe Rust. No YAML required.`
|
||||
- **Narration:**
|
||||
"This is Harmony. It's an open-source orchestrator that lets you define your entire stack — from a dev laptop to a multi-site bare-metal cluster—in a single, type-safe Rust codebase."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 6: Before & After**
|
||||
|
||||
- **Visual:** A side-by-side comparison. Left side: A screen full of complex, nested YAML. Right side: 10-15 lines of clean, readable Harmony Rust DSL that accomplishes the same thing.
|
||||
- **Narration:**
|
||||
"This is the difference. On the left, the fragile world of strings and templates. On the right, a portable, verifiable program that describes your apps, your infra, and your operations. We unify scaffolding, provisioning, and Day-2 ops, all verified by the Rust compiler. But enough slides... let's see it in action."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 7: Live Demo: Zero to Monitored App**
|
||||
|
||||
**Goal:** Show, don't just tell. Make it look effortless. This is where you build the "dream."
|
||||
|
||||
- **Visual:** Your terminal/IDE, ready to go.
|
||||
- **Narration Guide:**
|
||||
"Okay, for this demo, we're going to take a standard web app from GitHub. Nothing special about it."
|
||||
_(Show the repo)_
|
||||
"Now, let's bring it into Harmony. This is the entire definition we need to describe the application and its needs."
|
||||
_(Show the Rust DSL)_
|
||||
"First, let's run it locally on k3d. The exact same definition for dev as for prod."
|
||||
_(Deploy locally, show it works)_
|
||||
"Cool. But a real app needs monitoring. In Harmony, that's just adding a feature to our code."
|
||||
_(Uncomment one line: `.with_feature(Monitoring)` and redeploy)_
|
||||
"And just like that, we have a fully configured Prometheus and Grafana stack, scraping our app. No YAML, no extra config."
|
||||
"Finally, let's push this to our production staging cluster. We just change the target and specify our multi-site Ceph storage."
|
||||
_(Deploy to the remote cluster)_
|
||||
"And there it is. We've gone from a simple web app to a monitored, enterprise-grade service in minutes."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 8: Live Demo: Embracing Chaos**
|
||||
|
||||
**Goal:** Prove the "predictable" and "resilient" claims in the most dramatic way possible.
|
||||
|
||||
- **Visual:** A slide showing a map or diagram of your distributed infrastructure (the different data centers). Then switch back to your terminal.
|
||||
- **Narration Guide:**
|
||||
"This is great when things are sunny. But production is chaos. So... let's break things. On purpose."
|
||||
"First, a network failure." _(Kill a switch/link, show app is still up)_
|
||||
"Now, let's power off a storage server." _(Force off a server, show Ceph healing and the app is unaffected)_
|
||||
"How about a control plane node?" _(Force off a k8s control plane, show the cluster is still running)_
|
||||
"Okay, for the grand finale. What if we have a cascading failure? I'm going to kill _another_ storage server. This should cause a total failure in this data center."
|
||||
_(Force off the second server, narrate what's happening)_
|
||||
"And there it is... Ceph has lost quorum in this site... and Harmony has automatically failed everything over to our other datacenter. The app is still running."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 9: The New Reality**
|
||||
|
||||
**Goal:** Summarize the dream and tell the audience what you want them to do.
|
||||
|
||||
- **Visual:** The clean, simple Harmony Rust DSL code from Slide 6. A summary of what was just accomplished is listed next to it: `✓ GitHub to Prod in minutes`, `✓ Type-Safe Validation`, `✓ Built-in Monitoring`, `✓ Automated Multi-Site Failover`.
|
||||
- **Narration:**
|
||||
"So, in just a few minutes, we went from a simple web app to a multi-site, monitored, and chaos-proof production deployment. We did it with a small amount of code that is easy to read, easy to verify, and completely portable. This is our vision: to offload the complexity, and make infrastructure simple, predictable, and even fun again."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 10: Join Us**
|
||||
|
||||
- **Visual:** A clean, final slide with QR codes and links.
|
||||
- GitHub Repo (`github.com/nation-tech/harmony`)
|
||||
- Website (`harmony.sh` or similar)
|
||||
- Your contact info (`jg@nation.tech` / LinkedIn / Twitter)
|
||||
- **Narration:**
|
||||
"Harmony is open-source, AGPLv3. We believe this is the future, but we're just getting started. We know this crowd has great infrastructure minds out there, and we need your feedback. Please, check out the project on GitHub. Star it if you like what you see. Tell us what's missing. Let's build this future together. Thank you."
|
||||
|
||||
**(Open for Q&A)**
|
||||
108
docs/pxe_test/README.md
Normal file
108
docs/pxe_test/README.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# OPNsense PXE Lab Environment
|
||||
|
||||
This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall.
|
||||
|
||||
## Overview
|
||||
|
||||
The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM:
|
||||
|
||||
1. **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab.
|
||||
2. **Two Virtual Machines**:
|
||||
* `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server.
|
||||
* `pxe-node-1`: A client VM configured to boot from the network.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Ensure you have the following software installed on your Arch Linux host:
|
||||
|
||||
* `libvirt`
|
||||
* `qemu`
|
||||
* `virt-install` (from the `virt-install` package)
|
||||
* `curl`
|
||||
* `bzip2`
|
||||
|
||||
## Usage
|
||||
|
||||
### 1. Create the Environment
|
||||
|
||||
Run the `up` command to download the necessary images and create the network and VMs.
|
||||
|
||||
```bash
|
||||
sudo ./pxe_vm_lab_setup.sh up
|
||||
```
|
||||
|
||||
### 2. Install and Configure OPNsense
|
||||
|
||||
The OPNsense VM is created but the OS needs to be installed manually via the console.
|
||||
|
||||
1. **Connect to the VM console**:
|
||||
```bash
|
||||
sudo virsh console opnsense-pxe
|
||||
```
|
||||
|
||||
2. **Log in as the installer**:
|
||||
* Username: `installer`
|
||||
* Password: `opnsense`
|
||||
|
||||
3. **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`):
|
||||
* Find the MAC address for the `harmonylan` interface by running this command in another terminal:
|
||||
```bash
|
||||
virsh domiflist opnsense-pxe
|
||||
# Example output:
|
||||
# Interface Type Source Model MAC
|
||||
# ---------------------------------------------------------
|
||||
# vnet18 network default virtio 52:54:00:b5:c4:6d
|
||||
# vnet19 network harmonylan virtio 52:54:00:21:f9:ba
|
||||
```
|
||||
* Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**.
|
||||
* Assign the other interface as your **WAN**.
|
||||
|
||||
4. After the installation is complete, **shut down** the VM from the console menu.
|
||||
|
||||
5. **Detach the installation media** by editing the VM's configuration:
|
||||
```bash
|
||||
sudo virsh edit opnsense-pxe
|
||||
```
|
||||
Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`).
|
||||
|
||||
6. **Start the VM** to boot into the newly installed system:
|
||||
```bash
|
||||
sudo virsh start opnsense-pxe
|
||||
```
|
||||
|
||||
### 3. Connect to OPNsense from Your Host
|
||||
|
||||
To configure OPNsense, you need to connect your host to the `harmonylan` network.
|
||||
|
||||
1. By default, OPNsense configures its LAN interface with the IP `192.168.1.1`.
|
||||
2. Assign a compatible IP address to your host's `virbr1` bridge interface:
|
||||
```bash
|
||||
sudo ip addr add 192.168.1.5/24 dev virbr1
|
||||
```
|
||||
3. You can now access the OPNsense VM from your host:
|
||||
* **SSH**: `ssh root@192.168.1.1` (password: `opnsense`)
|
||||
* **Web UI**: `https://192.168.1.1`
|
||||
|
||||
### 4. Configure PXE Services with Harmony
|
||||
|
||||
With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details:
|
||||
|
||||
* **Hostname/IP**: `192.168.1.1`
|
||||
* **Credentials**: `root` / `opnsense`
|
||||
|
||||
### 5. Boot the PXE Client
|
||||
|
||||
Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network.
|
||||
|
||||
```bash
|
||||
sudo virsh start pxe-node-1
|
||||
sudo virsh console pxe-node-1
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
To destroy all VMs and networks created by the script, run the `clean` command:
|
||||
|
||||
```bash
|
||||
sudo ./pxe_vm_lab_setup.sh clean
|
||||
```
|
||||
191
docs/pxe_test/pxe_vm_lab_setup.sh
Executable file
191
docs/pxe_test/pxe_vm_lab_setup.sh
Executable file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# --- Configuration ---
|
||||
LAB_DIR="/var/lib/harmony_pxe_test"
|
||||
IMG_DIR="${LAB_DIR}/images"
|
||||
STATE_DIR="${LAB_DIR}/state"
|
||||
VM_OPN="opnsense-pxe"
|
||||
VM_PXE="pxe-node-1"
|
||||
NET_HARMONYLAN="harmonylan"
|
||||
|
||||
# Network settings for the isolated LAN
|
||||
VLAN_CIDR="192.168.150.0/24"
|
||||
VLAN_GW="192.168.150.1"
|
||||
VLAN_MASK="255.255.255.0"
|
||||
|
||||
# VM Specifications
|
||||
RAM_OPN="2048"
|
||||
VCPUS_OPN="2"
|
||||
DISK_OPN_GB="10"
|
||||
OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant
|
||||
|
||||
RAM_PXE="4096"
|
||||
VCPUS_PXE="2"
|
||||
DISK_PXE_GB="40"
|
||||
OS_VARIANT_LINUX="centos-stream9"
|
||||
|
||||
OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2"
|
||||
OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img"
|
||||
CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso"
|
||||
CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso"
|
||||
|
||||
CONNECT_URI="qemu:///system"
|
||||
|
||||
download_if_missing() {
|
||||
local url="$1"
|
||||
local dest="$2"
|
||||
if [[ ! -f "$dest" ]]; then
|
||||
echo "Downloading $url to $dest"
|
||||
mkdir -p "$(dirname "$dest")"
|
||||
local tmp
|
||||
tmp="$(mktemp)"
|
||||
curl -L --progress-bar "$url" -o "$tmp"
|
||||
case "$url" in
|
||||
*.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;;
|
||||
*) mv "$tmp" "$dest" ;;
|
||||
esac
|
||||
else
|
||||
echo "Already present: $dest"
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensures a libvirt network is defined and active
|
||||
ensure_network() {
|
||||
local net_name="$1"
|
||||
local net_xml_path="$2"
|
||||
if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then
|
||||
echo "Network ${net_name} already exists."
|
||||
else
|
||||
echo "Defining network ${net_name} from ${net_xml_path}"
|
||||
virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}"
|
||||
fi
|
||||
|
||||
if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then
|
||||
echo "Starting network ${net_name}..."
|
||||
virsh --connect "${CONNECT_URI}" net-start "${net_name}"
|
||||
virsh --connect "${CONNECT_URI}" net-autostart "${net_name}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Destroys a VM completely
|
||||
destroy_vm() {
|
||||
local vm_name="$1"
|
||||
if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then
|
||||
echo "Destroying and undefining VM: ${vm_name}"
|
||||
virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true
|
||||
virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram
|
||||
fi
|
||||
}
|
||||
|
||||
# Destroys a libvirt network
|
||||
destroy_network() {
|
||||
local net_name="$1"
|
||||
if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then
|
||||
echo "Destroying and undefining network: ${net_name}"
|
||||
virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true
|
||||
virsh --connect "${CONNECT_URI}" net-undefine "$net_name"
|
||||
fi
|
||||
}
|
||||
|
||||
# --- Main Logic ---
|
||||
create_lab_environment() {
|
||||
# Create network definition files
|
||||
cat > "${STATE_DIR}/default.xml" <<EOF
|
||||
<network>
|
||||
<name>default</name>
|
||||
<forward mode='nat'/>
|
||||
<bridge name='virbr0' stp='on' delay='0'/>
|
||||
<ip address='192.168.122.1' netmask='255.255.255.0'>
|
||||
<dhcp>
|
||||
<range start='192.168.122.100' end='192.168.122.200'/>
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF
|
||||
<network>
|
||||
<name>${NET_HARMONYLAN}</name>
|
||||
<bridge name='virbr1' stp='on' delay='0'/>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
# Ensure both networks exist and are active
|
||||
ensure_network "default" "${STATE_DIR}/default.xml"
|
||||
ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml"
|
||||
|
||||
# --- Create OPNsense VM (MODIFIED SECTION) ---
|
||||
local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2"
|
||||
if [[ ! -f "$disk_opn" ]]; then
|
||||
qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G"
|
||||
fi
|
||||
|
||||
echo "Creating OPNsense VM using serial image..."
|
||||
virt-install \
|
||||
--connect "${CONNECT_URI}" \
|
||||
--name "${VM_OPN}" \
|
||||
--ram "${RAM_OPN}" \
|
||||
--vcpus "${VCPUS_OPN}" \
|
||||
--cpu host-passthrough \
|
||||
--os-variant "${OS_VARIANT_OPN}" \
|
||||
--graphics none \
|
||||
--noautoconsole \
|
||||
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
|
||||
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
|
||||
--network network=default,model=virtio \
|
||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
||||
--boot uefi,menu=on
|
||||
|
||||
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
|
||||
echo "The VM will boot from the serial installation image."
|
||||
echo "Login with user 'installer' and password 'opnsense' to start the installation."
|
||||
echo "Install onto the VirtIO disk (vtbd0)."
|
||||
echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system."
|
||||
|
||||
# --- Create PXE Client VM ---
|
||||
local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2"
|
||||
if [[ ! -f "$disk_pxe" ]]; then
|
||||
qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G"
|
||||
fi
|
||||
|
||||
echo "Creating PXE client VM..."
|
||||
virt-install \
|
||||
--connect "${CONNECT_URI}" \
|
||||
--name "${VM_PXE}" \
|
||||
--ram "${RAM_PXE}" \
|
||||
--vcpus "${VCPUS_PXE}" \
|
||||
--cpu host-passthrough \
|
||||
--os-variant "${OS_VARIANT_LINUX}" \
|
||||
--graphics none \
|
||||
--noautoconsole \
|
||||
--disk path="${disk_pxe}",format=qcow2,bus=virtio \
|
||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
||||
--pxe \
|
||||
--boot uefi,menu=on
|
||||
|
||||
echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}."
|
||||
}
|
||||
|
||||
# --- Script Entrypoint ---
|
||||
case "${1:-}" in
|
||||
up)
|
||||
mkdir -p "${IMG_DIR}" "${STATE_DIR}"
|
||||
download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH"
|
||||
download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH"
|
||||
create_lab_environment
|
||||
echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs."
|
||||
;;
|
||||
clean)
|
||||
destroy_vm "${VM_PXE}"
|
||||
destroy_vm "${VM_OPN}"
|
||||
destroy_network "${NET_HARMONYLAN}"
|
||||
# Optionally destroy the default network if you want a full reset
|
||||
# destroy_network "default"
|
||||
echo "Cleanup complete."
|
||||
;;
|
||||
*)
|
||||
echo "Usage: sudo $0 {up|clean}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
15
examples/application_monitoring_with_tenant/Cargo.toml
Normal file
15
examples/application_monitoring_with_tenant/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "example-application-monitoring-with-tenant"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
env_logger.workspace = true
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
logging = "0.1.0"
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
BIN
examples/application_monitoring_with_tenant/harmony
Executable file
BIN
examples/application_monitoring_with_tenant/harmony
Executable file
Binary file not shown.
56
examples/application_monitoring_with_tenant/src/main.rs
Normal file
56
examples/application_monitoring_with_tenant/src/main.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use std::{path::PathBuf, str::FromStr, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
|
||||
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
//TODO there is a bug where the application is deployed into the namespace matching the
|
||||
//application name and the tenant is created in the namesapce matching the tenant name
|
||||
//in order for the application to be deployed in the tenant namespace the application.name and
|
||||
//the TenantConfig.name must match
|
||||
let tenant = TenantScore {
|
||||
config: TenantConfig {
|
||||
id: Id::from_str("test-tenant-id").unwrap(),
|
||||
name: "example-monitoring".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
});
|
||||
|
||||
let webhook_receiver = WebhookReceiver {
|
||||
name: "sample-webhook-receiver".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![Box::new(Monitoring {
|
||||
alert_receiver: vec![Box::new(webhook_receiver)],
|
||||
application: application.clone(),
|
||||
})],
|
||||
application,
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(tenant), Box::new(app)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -1,20 +1,27 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
inventory::DiscoverInventoryAgentScore,
|
||||
},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let inventory = Inventory::autoload();
|
||||
let topology = LocalhostTopology::new();
|
||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||
|
||||
maestro.register_all(vec![
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
LocalhostTopology::new(),
|
||||
vec![
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
Box::new(DiscoverInventoryAgentScore {
|
||||
discovery_timeout: Some(10),
|
||||
}),
|
||||
],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -125,40 +125,47 @@ spec:
|
||||
name: nginx"#,
|
||||
)
|
||||
.unwrap();
|
||||
return deployment;
|
||||
deployment
|
||||
}
|
||||
fn nginx_deployment_2() -> Deployment {
|
||||
let mut pod_template = PodTemplateSpec::default();
|
||||
pod_template.metadata = Some(ObjectMeta {
|
||||
labels: Some(BTreeMap::from([(
|
||||
"app".to_string(),
|
||||
"nginx-test".to_string(),
|
||||
)])),
|
||||
..Default::default()
|
||||
});
|
||||
pod_template.spec = Some(PodSpec {
|
||||
containers: vec![Container {
|
||||
name: "nginx".to_string(),
|
||||
image: Some("nginx".to_string()),
|
||||
let pod_template = PodTemplateSpec {
|
||||
metadata: Some(ObjectMeta {
|
||||
labels: Some(BTreeMap::from([(
|
||||
"app".to_string(),
|
||||
"nginx-test".to_string(),
|
||||
)])),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
});
|
||||
let mut spec = DeploymentSpec::default();
|
||||
spec.template = pod_template;
|
||||
spec.selector = LabelSelector {
|
||||
match_expressions: None,
|
||||
match_labels: Some(BTreeMap::from([(
|
||||
"app".to_string(),
|
||||
"nginx-test".to_string(),
|
||||
)])),
|
||||
}),
|
||||
spec: Some(PodSpec {
|
||||
containers: vec![Container {
|
||||
name: "nginx".to_string(),
|
||||
image: Some("nginx".to_string()),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
}),
|
||||
};
|
||||
|
||||
let mut deployment = Deployment::default();
|
||||
deployment.spec = Some(spec);
|
||||
deployment.metadata.name = Some("nginx-test".to_string());
|
||||
let spec = DeploymentSpec {
|
||||
template: pod_template,
|
||||
selector: LabelSelector {
|
||||
match_expressions: None,
|
||||
match_labels: Some(BTreeMap::from([(
|
||||
"app".to_string(),
|
||||
"nginx-test".to_string(),
|
||||
)])),
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
deployment
|
||||
Deployment {
|
||||
spec: Some(spec),
|
||||
metadata: ObjectMeta {
|
||||
name: Some("nginx-test".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn nginx_deployment() -> Deployment {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::lamp::{LAMPConfig, LAMPScore},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@@ -24,7 +24,7 @@ async fn main() {
|
||||
// This config can be extended as needed for more complicated configurations
|
||||
config: LAMPConfig {
|
||||
project_root: "./php".into(),
|
||||
database_size: format!("4Gi").into(),
|
||||
database_size: "4Gi".to_string().into(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
@@ -43,15 +43,13 @@ async fn main() {
|
||||
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
|
||||
// locally, to development environment from a CI, to staging, and to production with settings
|
||||
// that automatically adapt to each environment grade.
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(lamp_stack)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(lamp_stack)]);
|
||||
// Here we bootstrap the CLI, this gives some nice features if you need them
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
// That's it, end of the infra as code.
|
||||
|
||||
@@ -6,8 +6,9 @@ readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony_macros = { version = "0.1.0", path = "../../harmony_macros" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::collections::HashMap;
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
@@ -23,8 +22,9 @@ use harmony::{
|
||||
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@@ -51,8 +51,8 @@ async fn main() {
|
||||
|
||||
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
||||
port: Some("80".to_string()),
|
||||
path: "/metrics".to_string(),
|
||||
scheme: HTTPScheme::HTTP,
|
||||
path: Some("/metrics".to_string()),
|
||||
scheme: Some(HTTPScheme::HTTP),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -74,13 +74,13 @@ async fn main() {
|
||||
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(alerting_score)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(alerting_score)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
cidr.workspace = true
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
@@ -20,16 +18,18 @@ use harmony::{
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{
|
||||
K8sAnywhereTopology, Url,
|
||||
K8sAnywhereTopology,
|
||||
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let tenant = TenantScore {
|
||||
config: TenantConfig {
|
||||
id: Id::from_string("1234".to_string()),
|
||||
id: Id::from_str("1234").unwrap(),
|
||||
name: "test-tenant".to_string(),
|
||||
resource_limits: ResourceLimits {
|
||||
cpu_request_cores: 6.0,
|
||||
@@ -54,8 +54,8 @@ async fn main() {
|
||||
|
||||
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
||||
port: Some("80".to_string()),
|
||||
path: "/metrics".to_string(),
|
||||
scheme: HTTPScheme::HTTP,
|
||||
path: Some("/metrics".to_string()),
|
||||
scheme: Some(HTTPScheme::HTTP),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -78,13 +78,13 @@ async fn main() {
|
||||
rules: vec![Box::new(additional_rules)],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(tenant), Box::new(alerting_score)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(tenant), Box::new(alerting_score)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ use harmony::{
|
||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
http::StaticFilesHttpScore,
|
||||
ipxe::IpxeScore,
|
||||
@@ -19,9 +18,10 @@ use harmony::{
|
||||
},
|
||||
tftp::TftpScore,
|
||||
},
|
||||
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||
topology::{LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@@ -87,8 +87,7 @@ async fn main() {
|
||||
let inventory = Inventory {
|
||||
location: Location::new("I am mobile".to_string(), "earth".to_string()),
|
||||
switch: SwitchGroup::from([]),
|
||||
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||
storage_host: vec![],
|
||||
worker_host: vec![
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
@@ -126,20 +125,28 @@ async fn main() {
|
||||
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
||||
|
||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
||||
"./data/watchguard/pxe-http-files".to_string(),
|
||||
));
|
||||
let http_score = StaticFilesHttpScore {
|
||||
folder_to_serve: Some(Url::LocalFolder(
|
||||
"./data/watchguard/pxe-http-files".to_string(),
|
||||
)),
|
||||
files: vec![],
|
||||
};
|
||||
let ipxe_score = IpxeScore::new();
|
||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||
maestro.register_all(vec![
|
||||
Box::new(dns_score),
|
||||
Box::new(bootstrap_dhcp_score),
|
||||
Box::new(bootstrap_load_balancer_score),
|
||||
Box::new(load_balancer_score),
|
||||
Box::new(tftp_score),
|
||||
Box::new(http_score),
|
||||
Box::new(ipxe_score),
|
||||
Box::new(dhcp_score),
|
||||
]);
|
||||
harmony_tui::init(maestro).await.unwrap();
|
||||
|
||||
harmony_tui::run(
|
||||
inventory,
|
||||
topology,
|
||||
vec![
|
||||
Box::new(dns_score),
|
||||
Box::new(bootstrap_dhcp_score),
|
||||
Box::new(bootstrap_load_balancer_score),
|
||||
Box::new(load_balancer_score),
|
||||
Box::new(tftp_score),
|
||||
Box::new(http_score),
|
||||
Box::new(ipxe_score),
|
||||
Box::new(dhcp_score),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
use harmony::{
|
||||
inventory::Inventory, maestro::Maestro, modules::monitoring::ntfy::ntfy::NtfyScore,
|
||||
topology::K8sAnywhereTopology,
|
||||
inventory::Inventory, modules::monitoring::ntfy::ntfy::NtfyScore, topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(NtfyScore {
|
||||
namespace: "monitoring".to_string(),
|
||||
host: "localhost".to_string(),
|
||||
})],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(NtfyScore {
|
||||
namespace: "monitoring".to_string(),
|
||||
host: "localhost".to_string(),
|
||||
})]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
||||
21
examples/okd_pxe/Cargo.toml
Normal file
21
examples/okd_pxe/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "example-pxe"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
harmony_secret = { path = "../../harmony_secret" }
|
||||
harmony_secret_derive = { path = "../../harmony_secret_derive" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
serde.workspace = true
|
||||
24
examples/okd_pxe/src/main.rs
Normal file
24
examples/okd_pxe/src/main.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
mod topology;
|
||||
|
||||
use crate::topology::{get_inventory, get_topology};
|
||||
use harmony::modules::okd::ipxe::OkdIpxeScore;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let inventory = get_inventory();
|
||||
let topology = get_topology().await;
|
||||
|
||||
let kickstart_filename = "inventory.kickstart".to_string();
|
||||
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
|
||||
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
|
||||
|
||||
let ipxe_score = OkdIpxeScore {
|
||||
kickstart_filename,
|
||||
harmony_inventory_agent,
|
||||
cluster_pubkey_filename,
|
||||
};
|
||||
|
||||
harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
77
examples/okd_pxe/src/topology.rs
Normal file
77
examples/okd_pxe/src/topology.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, ipv4};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{net::IpAddr, sync::Arc};
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||
struct OPNSenseFirewallConfig {
|
||||
username: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
pub async fn get_topology() -> HAClusterTopology {
|
||||
let firewall = harmony::topology::LogicalHost {
|
||||
ip: ip!("192.168.1.1"),
|
||||
name: String::from("opnsense-1"),
|
||||
};
|
||||
|
||||
let config = SecretManager::get::<OPNSenseFirewallConfig>().await;
|
||||
let config = config.unwrap();
|
||||
|
||||
let opnsense = Arc::new(
|
||||
harmony::infra::opnsense::OPNSenseFirewall::new(
|
||||
firewall,
|
||||
None,
|
||||
&config.username,
|
||||
&config.password,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
let lan_subnet = ipv4!("192.168.1.0");
|
||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||
harmony::topology::HAClusterTopology {
|
||||
domain_name: "demo.harmony.mcd".to_string(),
|
||||
router: Arc::new(UnmanagedRouter::new(
|
||||
gateway_ip,
|
||||
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
|
||||
)),
|
||||
load_balancer: opnsense.clone(),
|
||||
firewall: opnsense.clone(),
|
||||
tftp_server: opnsense.clone(),
|
||||
http_server: opnsense.clone(),
|
||||
dhcp_server: opnsense.clone(),
|
||||
dns_server: opnsense.clone(),
|
||||
control_plane: vec![LogicalHost {
|
||||
ip: ip!("10.100.8.20"),
|
||||
name: "cp0".to_string(),
|
||||
}],
|
||||
bootstrap_host: LogicalHost {
|
||||
ip: ip!("10.100.8.20"),
|
||||
name: "cp0".to_string(),
|
||||
},
|
||||
workers: vec![],
|
||||
switch: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_inventory() -> Inventory {
|
||||
Inventory {
|
||||
location: Location::new(
|
||||
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
|
||||
"testopnsense".to_string(),
|
||||
),
|
||||
switch: SwitchGroup::from([]),
|
||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||
storage_host: vec![],
|
||||
worker_host: vec![],
|
||||
control_plane_host: vec![],
|
||||
}
|
||||
}
|
||||
7
examples/okd_pxe/ssh_example_key
Normal file
7
examples/okd_pxe/ssh_example_key
Normal file
@@ -0,0 +1,7 @@
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
|
||||
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
|
||||
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
|
||||
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
1
examples/okd_pxe/ssh_example_key.pub
Normal file
1
examples/okd_pxe/ssh_example_key.pub
Normal file
@@ -0,0 +1 @@
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
||||
@@ -8,7 +8,6 @@ use harmony::{
|
||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
http::StaticFilesHttpScore,
|
||||
@@ -16,9 +15,10 @@ use harmony::{
|
||||
opnsense::OPNsenseShellCommandScore,
|
||||
tftp::TftpScore,
|
||||
},
|
||||
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||
topology::{LogicalHost, UnmanagedRouter},
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@@ -63,8 +63,7 @@ async fn main() {
|
||||
"wk".to_string(),
|
||||
),
|
||||
switch: SwitchGroup::from([]),
|
||||
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
||||
storage_host: vec![],
|
||||
worker_host: vec![],
|
||||
control_plane_host: vec![
|
||||
@@ -81,23 +80,31 @@ async fn main() {
|
||||
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
||||
|
||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
||||
"./data/watchguard/pxe-http-files".to_string(),
|
||||
));
|
||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||
maestro.register_all(vec![
|
||||
Box::new(dns_score),
|
||||
Box::new(dhcp_score),
|
||||
Box::new(load_balancer_score),
|
||||
Box::new(tftp_score),
|
||||
Box::new(http_score),
|
||||
Box::new(OPNsenseShellCommandScore {
|
||||
opnsense: opnsense.get_opnsense_config(),
|
||||
command: "touch /tmp/helloharmonytouching".to_string(),
|
||||
}),
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
]);
|
||||
harmony_tui::init(maestro).await.unwrap();
|
||||
let http_score = StaticFilesHttpScore {
|
||||
folder_to_serve: Some(Url::LocalFolder(
|
||||
"./data/watchguard/pxe-http-files".to_string(),
|
||||
)),
|
||||
files: vec![],
|
||||
};
|
||||
|
||||
harmony_tui::run(
|
||||
inventory,
|
||||
topology,
|
||||
vec![
|
||||
Box::new(dns_score),
|
||||
Box::new(dhcp_score),
|
||||
Box::new(load_balancer_score),
|
||||
Box::new(tftp_score),
|
||||
Box::new(http_score),
|
||||
Box::new(OPNsenseShellCommandScore {
|
||||
opnsense: opnsense.get_opnsense_config(),
|
||||
command: "touch /tmp/helloharmonytouching".to_string(),
|
||||
}),
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -2,24 +2,21 @@ use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring},
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring},
|
||||
},
|
||||
monitoring::alert_channel::{
|
||||
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
||||
},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_cli::cli_logger;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let cli_logger_handle = cli_logger::init();
|
||||
|
||||
let topology = K8sAnywhereTopology::from_env();
|
||||
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
@@ -27,6 +24,16 @@ async fn main() {
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
});
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let webhook_receiver = WebhookReceiver {
|
||||
name: "sample-webhook-receiver".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(ContinuousDelivery {
|
||||
@@ -34,13 +41,19 @@ async fn main() {
|
||||
}),
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
}), // TODO: add backups, multisite ha, etc.
|
||||
alert_receiver: vec![Box::new(discord_receiver), Box::new(webhook_receiver)],
|
||||
}),
|
||||
// TODO add backups, multisite ha, etc
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
maestro.register_all(vec![Box::new(app)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
|
||||
let _ = tokio::try_join!(cli_logger_handle);
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(app)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -1,30 +1,30 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::tenant::TenantScore,
|
||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let tenant = TenantScore {
|
||||
config: TenantConfig {
|
||||
id: Id::from_str("test-tenant-id"),
|
||||
id: Id::from_str("test-tenant-id").unwrap(),
|
||||
name: "testtenant".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(tenant)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(tenant)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
||||
// TODO write tests
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::net::{SocketAddr, SocketAddrV4};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
dns::DnsScore,
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
@@ -16,18 +15,19 @@ use harmony_macros::ipv4;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let inventory = Inventory::autoload();
|
||||
let topology = DummyInfra {};
|
||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||
|
||||
maestro.register_all(vec![
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
Box::new(DnsScore::new(vec![], None)),
|
||||
Box::new(build_large_score()),
|
||||
]);
|
||||
harmony_tui::init(maestro).await.unwrap();
|
||||
harmony_tui::run(
|
||||
Inventory::autoload(),
|
||||
DummyInfra {},
|
||||
vec![
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
Box::new(DnsScore::new(vec![], None)),
|
||||
Box::new(build_large_score()),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn build_large_score() -> LoadBalancerScore {
|
||||
|
||||
11
examples/validate_ceph_cluster_health/Cargo.toml
Normal file
11
examples/validate_ceph_cluster_health/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "example_validate_ceph_cluster_health"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
tokio.workspace = true
|
||||
18
examples/validate_ceph_cluster_health/src/main.rs
Normal file
18
examples/validate_ceph_cluster_health/src/main.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::storage::ceph::ceph_validate_health_score::CephVerifyClusterHealth,
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let ceph_health_score = CephVerifyClusterHealth {
|
||||
rook_ceph_namespace: "rook-ceph".to_string(),
|
||||
};
|
||||
|
||||
let topology = K8sAnywhereTopology::from_env();
|
||||
let inventory = Inventory::autoload();
|
||||
harmony_cli::run(inventory, topology, vec![Box::new(ceph_health_score)], None)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -5,16 +5,17 @@ version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
rand = "0.9"
|
||||
hex = "0.4"
|
||||
libredfish = "0.1.1"
|
||||
reqwest = { version = "0.11", features = ["blocking", "json"] }
|
||||
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
|
||||
russh = "0.45.0"
|
||||
rust-ipmi = "0.1.1"
|
||||
semver = "1.0.23"
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
derive-new.workspace = true
|
||||
log.workspace = true
|
||||
@@ -27,7 +28,7 @@ harmony_macros = { path = "../harmony_macros" }
|
||||
harmony_types = { path = "../harmony_types" }
|
||||
uuid.workspace = true
|
||||
url.workspace = true
|
||||
kube.workspace = true
|
||||
kube = { workspace = true, features = ["derive"] }
|
||||
k8s-openapi.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
http.workspace = true
|
||||
@@ -35,8 +36,8 @@ serde-value.workspace = true
|
||||
helm-wrapper-rs = "0.4.0"
|
||||
non-blank-string-rs = "1.0.4"
|
||||
k3d-rs = { path = "../k3d" }
|
||||
directories = "6.0.0"
|
||||
lazy_static = "1.5.0"
|
||||
directories.workspace = true
|
||||
lazy_static.workspace = true
|
||||
dockerfile_builder = "0.1.5"
|
||||
temp-file = "0.1.9"
|
||||
convert_case.workspace = true
|
||||
@@ -56,12 +57,19 @@ similar.workspace = true
|
||||
futures-util = "0.3.31"
|
||||
tokio-util = "0.7.15"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
tempfile = "3.20.0"
|
||||
tempfile.workspace = true
|
||||
serde_with = "3.14.0"
|
||||
schemars = "0.8.22"
|
||||
kube-derive = "1.1.0"
|
||||
bollard.workspace = true
|
||||
tar.workspace = true
|
||||
base64.workspace = true
|
||||
thiserror.workspace = true
|
||||
once_cell = "1.21.3"
|
||||
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
||||
harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" }
|
||||
askama.workspace = true
|
||||
sqlx.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions.workspace = true
|
||||
|
||||
BIN
harmony/harmony.rlib
Normal file
BIN
harmony/harmony.rlib
Normal file
Binary file not shown.
@@ -11,5 +11,13 @@ lazy_static! {
|
||||
pub static ref REGISTRY_PROJECT: String =
|
||||
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
|
||||
pub static ref DRY_RUN: bool =
|
||||
std::env::var("HARMONY_DRY_RUN").map_or(true, |value| value.parse().unwrap_or(true));
|
||||
std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false));
|
||||
pub static ref DEFAULT_DATABASE_URL: String = "sqlite://harmony.sqlite".to_string();
|
||||
pub static ref DATABASE_URL: String = std::env::var("HARMONY_DATABASE_URL")
|
||||
.map(|value| if value.is_empty() {
|
||||
(*DEFAULT_DATABASE_URL).clone()
|
||||
} else {
|
||||
value
|
||||
})
|
||||
.unwrap_or((*DEFAULT_DATABASE_URL).clone());
|
||||
}
|
||||
|
||||
22
harmony/src/domain/data/file.rs
Normal file
22
harmony/src/domain/data/file.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FileContent {
|
||||
pub path: FilePath,
|
||||
pub content: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum FilePath {
|
||||
Relative(String),
|
||||
Absolute(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for FilePath {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilePath::Relative(path) => f.write_fmt(format_args!("./{path}")),
|
||||
FilePath::Absolute(path) => f.write_fmt(format_args!("/{path}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::distr::SampleString;
|
||||
use std::str::FromStr;
|
||||
use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
@@ -24,12 +25,26 @@ pub struct Id {
|
||||
}
|
||||
|
||||
impl Id {
|
||||
pub fn from_string(value: String) -> Self {
|
||||
Self { value }
|
||||
pub fn empty() -> Self {
|
||||
Id {
|
||||
value: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_str(value: &str) -> Self {
|
||||
Self::from_string(value.to_string())
|
||||
impl FromStr for Id {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Id {
|
||||
value: s.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Id {
|
||||
fn from(value: String) -> Self {
|
||||
Self { value }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
mod id;
|
||||
mod file;
|
||||
mod version;
|
||||
pub use id::*;
|
||||
pub use file::*;
|
||||
pub use version::*;
|
||||
|
||||
@@ -47,7 +47,7 @@ impl serde::Serialize for Version {
|
||||
|
||||
impl std::fmt::Display for Version {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
return self.value.fmt(f);
|
||||
self.value.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use std::fmt;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::topology::IpAddress;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExecutorError {
|
||||
|
||||
@@ -1,77 +1,174 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use derive_new::new;
|
||||
use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive};
|
||||
use harmony_types::net::MacAddress;
|
||||
use serde::{Serialize, Serializer, ser::SerializeStruct};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_value::Value;
|
||||
|
||||
pub type HostGroup = Vec<PhysicalHost>;
|
||||
pub type SwitchGroup = Vec<Switch>;
|
||||
pub type FirewallGroup = Vec<PhysicalHost>;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct PhysicalHost {
|
||||
pub id: Id,
|
||||
pub category: HostCategory,
|
||||
pub network: Vec<NetworkInterface>,
|
||||
pub management: Arc<dyn ManagementInterface>,
|
||||
pub storage: Vec<Storage>,
|
||||
pub storage: Vec<StorageDrive>,
|
||||
pub labels: Vec<Label>,
|
||||
pub memory_size: Option<u64>,
|
||||
pub cpu_count: Option<u64>,
|
||||
pub memory_modules: Vec<MemoryModule>,
|
||||
pub cpus: Vec<CPU>,
|
||||
}
|
||||
|
||||
impl PhysicalHost {
|
||||
pub fn empty(category: HostCategory) -> Self {
|
||||
Self {
|
||||
id: Id::empty(),
|
||||
category,
|
||||
network: vec![],
|
||||
storage: vec![],
|
||||
labels: vec![],
|
||||
management: Arc::new(ManualManagementInterface {}),
|
||||
memory_size: None,
|
||||
cpu_count: None,
|
||||
memory_modules: vec![],
|
||||
cpus: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn summary(&self) -> String {
|
||||
let mut parts = Vec::new();
|
||||
|
||||
// Part 1: System Model (from labels) or Category as a fallback
|
||||
let model = self
|
||||
.labels
|
||||
.iter()
|
||||
.find(|l| l.name == "system-product-name" || l.name == "model")
|
||||
.map(|l| l.value.clone())
|
||||
.unwrap_or_else(|| self.category.to_string());
|
||||
parts.push(model);
|
||||
|
||||
// Part 2: CPU Information
|
||||
if !self.cpus.is_empty() {
|
||||
let cpu_count = self.cpus.len();
|
||||
let total_cores = self.cpus.iter().map(|c| c.cores).sum::<u32>();
|
||||
let total_threads = self.cpus.iter().map(|c| c.threads).sum::<u32>();
|
||||
let model_name = &self.cpus[0].model;
|
||||
|
||||
let cpu_summary = if cpu_count > 1 {
|
||||
format!(
|
||||
"{}x {} ({}c/{}t)",
|
||||
cpu_count, model_name, total_cores, total_threads
|
||||
)
|
||||
} else {
|
||||
format!("{} ({}c/{}t)", model_name, total_cores, total_threads)
|
||||
};
|
||||
parts.push(cpu_summary);
|
||||
}
|
||||
|
||||
// Part 3: Memory Information
|
||||
if !self.memory_modules.is_empty() {
|
||||
let total_mem_bytes = self
|
||||
.memory_modules
|
||||
.iter()
|
||||
.map(|m| m.size_bytes)
|
||||
.sum::<u64>();
|
||||
let total_mem_gb = (total_mem_bytes as f64 / (1024.0 * 1024.0 * 1024.0)).round() as u64;
|
||||
|
||||
// Find the most common speed among modules
|
||||
let mut speeds = std::collections::HashMap::new();
|
||||
for module in &self.memory_modules {
|
||||
if let Some(speed) = module.speed_mhz {
|
||||
*speeds.entry(speed).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
let common_speed = speeds
|
||||
.into_iter()
|
||||
.max_by_key(|&(_, count)| count)
|
||||
.map(|(speed, _)| speed);
|
||||
|
||||
if let Some(speed) = common_speed {
|
||||
parts.push(format!("{} GB RAM @ {}MHz", total_mem_gb, speed));
|
||||
} else {
|
||||
parts.push(format!("{} GB RAM", total_mem_gb));
|
||||
}
|
||||
}
|
||||
|
||||
// Part 4: Storage Information
|
||||
if !self.storage.is_empty() {
|
||||
let total_storage_bytes = self.storage.iter().map(|d| d.size_bytes).sum::<u64>();
|
||||
let drive_count = self.storage.len();
|
||||
let first_drive_model = &self.storage[0].model;
|
||||
|
||||
// Helper to format bytes into TB or GB
|
||||
let format_storage = |bytes: u64| {
|
||||
let tb = bytes as f64 / (1024.0 * 1024.0 * 1024.0 * 1024.0);
|
||||
if tb >= 1.0 {
|
||||
format!("{:.2} TB", tb)
|
||||
} else {
|
||||
let gb = bytes as f64 / (1024.0 * 1024.0 * 1024.0);
|
||||
format!("{:.0} GB", gb)
|
||||
}
|
||||
};
|
||||
|
||||
let storage_summary = if drive_count > 1 {
|
||||
format!(
|
||||
"{} Storage ({}x {})",
|
||||
format_storage(total_storage_bytes),
|
||||
drive_count,
|
||||
first_drive_model
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{} Storage ({})",
|
||||
format_storage(total_storage_bytes),
|
||||
first_drive_model
|
||||
)
|
||||
};
|
||||
parts.push(storage_summary);
|
||||
}
|
||||
|
||||
// Part 5: Network Information
|
||||
// Prioritize an "up" interface with an IPv4 address
|
||||
let best_nic = self
|
||||
.network
|
||||
.iter()
|
||||
.find(|n| n.is_up && !n.ipv4_addresses.is_empty())
|
||||
.or_else(|| self.network.first());
|
||||
|
||||
if let Some(nic) = best_nic {
|
||||
let speed = nic
|
||||
.speed_mbps
|
||||
.map(|s| format!("{}Gbps", s / 1000))
|
||||
.unwrap_or_else(|| "N/A".to_string());
|
||||
let mac = nic.mac_address.to_string();
|
||||
let nic_summary = if let Some(ip) = nic.ipv4_addresses.first() {
|
||||
format!("NIC: {} ({}, {})", speed, ip, mac)
|
||||
} else {
|
||||
format!("NIC: {} ({})", speed, mac)
|
||||
};
|
||||
parts.push(nic_summary);
|
||||
}
|
||||
|
||||
parts.join(" | ")
|
||||
}
|
||||
|
||||
pub fn cluster_mac(&self) -> MacAddress {
|
||||
self.network
|
||||
.get(0)
|
||||
.first()
|
||||
.expect("Cluster physical host should have a network interface")
|
||||
.mac_address
|
||||
.clone()
|
||||
}
|
||||
|
||||
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {
|
||||
self.cpu_count = cpu_count;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn memory_size(mut self, memory_size: Option<u64>) -> Self {
|
||||
self.memory_size = memory_size;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn storage(
|
||||
mut self,
|
||||
connection: StorageConnectionType,
|
||||
kind: StorageKind,
|
||||
size: u64,
|
||||
serial: String,
|
||||
) -> Self {
|
||||
self.storage.push(Storage {
|
||||
connection,
|
||||
kind,
|
||||
size,
|
||||
serial,
|
||||
});
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mac_address(mut self, mac_address: MacAddress) -> Self {
|
||||
self.network.push(NetworkInterface {
|
||||
name: None,
|
||||
name: String::new(),
|
||||
mac_address,
|
||||
speed: None,
|
||||
speed_mbps: None,
|
||||
is_up: false,
|
||||
mtu: 0,
|
||||
ipv4_addresses: vec![],
|
||||
ipv6_addresses: vec![],
|
||||
driver: String::new(),
|
||||
firmware_version: None,
|
||||
});
|
||||
self
|
||||
}
|
||||
@@ -80,52 +177,56 @@ impl PhysicalHost {
|
||||
self.labels.push(Label { name, value });
|
||||
self
|
||||
}
|
||||
|
||||
pub fn management(mut self, management: Arc<dyn ManagementInterface>) -> Self {
|
||||
self.management = management;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
// Custom Serialize implementation for PhysicalHost
|
||||
impl Serialize for PhysicalHost {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
// impl Serialize for PhysicalHost {
|
||||
// fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
// where
|
||||
// S: Serializer,
|
||||
// {
|
||||
// // Determine the number of fields
|
||||
// let mut num_fields = 5; // category, network, storage, labels, management
|
||||
// if self.memory_modules.is_some() {
|
||||
// num_fields += 1;
|
||||
// }
|
||||
// if self.cpus.is_some() {
|
||||
// num_fields += 1;
|
||||
// }
|
||||
//
|
||||
// // Create a serialization structure
|
||||
// let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
|
||||
//
|
||||
// // Serialize the standard fields
|
||||
// state.serialize_field("category", &self.category)?;
|
||||
// state.serialize_field("network", &self.network)?;
|
||||
// state.serialize_field("storage", &self.storage)?;
|
||||
// state.serialize_field("labels", &self.labels)?;
|
||||
//
|
||||
// // Serialize optional fields
|
||||
// if let Some(memory) = self.memory_modules {
|
||||
// state.serialize_field("memory_size", &memory)?;
|
||||
// }
|
||||
// if let Some(cpu) = self.cpus {
|
||||
// state.serialize_field("cpu_count", &cpu)?;
|
||||
// }
|
||||
//
|
||||
// let mgmt_data = self.management.serialize_management();
|
||||
// // pub management: Arc<dyn ManagementInterface>,
|
||||
//
|
||||
// // Handle management interface - either as a field or flattened
|
||||
// state.serialize_field("management", &mgmt_data)?;
|
||||
//
|
||||
// state.end()
|
||||
// }
|
||||
// }
|
||||
|
||||
impl<'de> Deserialize<'de> for PhysicalHost {
|
||||
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
// Determine the number of fields
|
||||
let mut num_fields = 5; // category, network, storage, labels, management
|
||||
if self.memory_size.is_some() {
|
||||
num_fields += 1;
|
||||
}
|
||||
if self.cpu_count.is_some() {
|
||||
num_fields += 1;
|
||||
}
|
||||
|
||||
// Create a serialization structure
|
||||
let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
|
||||
|
||||
// Serialize the standard fields
|
||||
state.serialize_field("category", &self.category)?;
|
||||
state.serialize_field("network", &self.network)?;
|
||||
state.serialize_field("storage", &self.storage)?;
|
||||
state.serialize_field("labels", &self.labels)?;
|
||||
|
||||
// Serialize optional fields
|
||||
if let Some(memory) = self.memory_size {
|
||||
state.serialize_field("memory_size", &memory)?;
|
||||
}
|
||||
if let Some(cpu) = self.cpu_count {
|
||||
state.serialize_field("cpu_count", &cpu)?;
|
||||
}
|
||||
|
||||
let mgmt_data = self.management.serialize_management();
|
||||
// pub management: Arc<dyn ManagementInterface>,
|
||||
|
||||
// Handle management interface - either as a field or flattened
|
||||
state.serialize_field("management", &mgmt_data)?;
|
||||
|
||||
state.end()
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,59 +280,10 @@ pub enum HostCategory {
|
||||
Switch,
|
||||
}
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct NetworkInterface {
|
||||
pub name: Option<String>,
|
||||
pub mac_address: MacAddress,
|
||||
pub speed: Option<u64>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
use harmony_macros::mac_address;
|
||||
#[cfg(test)]
|
||||
impl NetworkInterface {
|
||||
pub fn dummy() -> Self {
|
||||
Self {
|
||||
name: Some(String::new()),
|
||||
mac_address: mac_address!("00:00:00:00:00:00"),
|
||||
speed: Some(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub enum StorageConnectionType {
|
||||
Sata3g,
|
||||
Sata6g,
|
||||
Sas6g,
|
||||
Sas12g,
|
||||
PCIE,
|
||||
}
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum StorageKind {
|
||||
SSD,
|
||||
NVME,
|
||||
HDD,
|
||||
}
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct Storage {
|
||||
pub connection: StorageConnectionType,
|
||||
pub kind: StorageKind,
|
||||
pub size: u64,
|
||||
pub serial: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl Storage {
|
||||
pub fn dummy() -> Self {
|
||||
Self {
|
||||
connection: StorageConnectionType::Sata3g,
|
||||
kind: StorageKind::SSD,
|
||||
size: 0,
|
||||
serial: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Switch {
|
||||
@@ -262,146 +314,65 @@ impl Location {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HostCategory {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
HostCategory::Server => write!(f, "Server"),
|
||||
HostCategory::Firewall => write!(f, "Firewall"),
|
||||
HostCategory::Switch => write!(f, "Switch"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Label {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {}", self.name, self.value)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Location {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Address: {}, Name: {}", self.address, self.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PhysicalHost {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.summary())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Switch {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Switch with {} interfaces", self._interface.len())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
// Mock implementation of ManagementInterface
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct MockHPIlo {
|
||||
ip: String,
|
||||
username: String,
|
||||
password: String,
|
||||
firmware_version: String,
|
||||
}
|
||||
|
||||
impl ManagementInterface for MockHPIlo {
|
||||
fn boot_to_pxe(&self) {}
|
||||
|
||||
fn get_supported_protocol_names(&self) -> String {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
// Another mock implementation
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct MockDellIdrac {
|
||||
hostname: String,
|
||||
port: u16,
|
||||
api_token: String,
|
||||
}
|
||||
|
||||
impl ManagementInterface for MockDellIdrac {
|
||||
fn boot_to_pxe(&self) {}
|
||||
|
||||
fn get_supported_protocol_names(&self) -> String {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_physical_host_with_hp_ilo() {
|
||||
// Create a PhysicalHost with HP iLO management
|
||||
let host = PhysicalHost {
|
||||
category: HostCategory::Server,
|
||||
network: vec![NetworkInterface::dummy()],
|
||||
management: Arc::new(MockHPIlo {
|
||||
ip: "192.168.1.100".to_string(),
|
||||
username: "admin".to_string(),
|
||||
password: "password123".to_string(),
|
||||
firmware_version: "2.5.0".to_string(),
|
||||
}),
|
||||
storage: vec![Storage::dummy()],
|
||||
labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())],
|
||||
memory_size: Some(64_000_000),
|
||||
cpu_count: Some(16),
|
||||
};
|
||||
|
||||
// Serialize to JSON
|
||||
let json = serde_json::to_string(&host).expect("Failed to serialize host");
|
||||
|
||||
// Check that the serialized JSON contains the HP iLO details
|
||||
assert!(json.contains("192.168.1.100"));
|
||||
assert!(json.contains("admin"));
|
||||
assert!(json.contains("password123"));
|
||||
assert!(json.contains("firmware_version"));
|
||||
assert!(json.contains("2.5.0"));
|
||||
|
||||
// Parse back to verify structure (not the exact management interface)
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
|
||||
|
||||
// Verify basic structure
|
||||
assert_eq!(parsed["cpu_count"], 16);
|
||||
assert_eq!(parsed["memory_size"], 64_000_000);
|
||||
assert_eq!(parsed["network"][0]["name"], "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_physical_host_with_dell_idrac() {
|
||||
// Create a PhysicalHost with Dell iDRAC management
|
||||
let host = PhysicalHost {
|
||||
category: HostCategory::Server,
|
||||
network: vec![NetworkInterface::dummy()],
|
||||
management: Arc::new(MockDellIdrac {
|
||||
hostname: "idrac-server01".to_string(),
|
||||
port: 443,
|
||||
api_token: "abcdef123456".to_string(),
|
||||
}),
|
||||
storage: vec![Storage::dummy()],
|
||||
labels: vec![Label::new("env".to_string(), "production".to_string())],
|
||||
memory_size: Some(128_000_000),
|
||||
cpu_count: Some(32),
|
||||
};
|
||||
|
||||
// Serialize to JSON
|
||||
let json = serde_json::to_string(&host).expect("Failed to serialize host");
|
||||
|
||||
// Check that the serialized JSON contains the Dell iDRAC details
|
||||
assert!(json.contains("idrac-server01"));
|
||||
assert!(json.contains("443"));
|
||||
assert!(json.contains("abcdef123456"));
|
||||
|
||||
// Parse back to verify structure
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
|
||||
|
||||
// Verify basic structure
|
||||
assert_eq!(parsed["cpu_count"], 32);
|
||||
assert_eq!(parsed["memory_size"], 128_000_000);
|
||||
assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_management_implementations_produce_valid_json() {
|
||||
// Create hosts with different management implementations
|
||||
let host1 = PhysicalHost {
|
||||
id: Id::empty(),
|
||||
category: HostCategory::Server,
|
||||
network: vec![],
|
||||
management: Arc::new(MockHPIlo {
|
||||
ip: "10.0.0.1".to_string(),
|
||||
username: "root".to_string(),
|
||||
password: "secret".to_string(),
|
||||
firmware_version: "3.0.0".to_string(),
|
||||
}),
|
||||
storage: vec![],
|
||||
labels: vec![],
|
||||
memory_size: None,
|
||||
cpu_count: None,
|
||||
memory_modules: vec![],
|
||||
cpus: vec![],
|
||||
};
|
||||
|
||||
let host2 = PhysicalHost {
|
||||
id: Id::empty(),
|
||||
category: HostCategory::Server,
|
||||
network: vec![],
|
||||
management: Arc::new(MockDellIdrac {
|
||||
hostname: "server02-idrac".to_string(),
|
||||
port: 8443,
|
||||
api_token: "token123".to_string(),
|
||||
}),
|
||||
storage: vec![],
|
||||
labels: vec![],
|
||||
memory_size: None,
|
||||
cpu_count: None,
|
||||
memory_modules: vec![],
|
||||
cpus: vec![],
|
||||
};
|
||||
|
||||
// Both should serialize successfully
|
||||
@@ -411,8 +382,5 @@ mod tests {
|
||||
// Both JSONs should be valid and parseable
|
||||
let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1");
|
||||
let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2");
|
||||
|
||||
// The JSONs should be different because they contain different management interfaces
|
||||
assert_ne!(json1, json2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,63 +1,84 @@
|
||||
use log::debug;
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio::sync::broadcast;
|
||||
use std::{collections::HashMap, sync::Mutex};
|
||||
|
||||
use super::interpret::{InterpretError, Outcome};
|
||||
use crate::modules::application::ApplicationFeatureStatus;
|
||||
|
||||
use super::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
topology::TopologyStatus,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HarmonyEvent {
|
||||
HarmonyStarted,
|
||||
PrepareTopologyStarted {
|
||||
topology: String,
|
||||
},
|
||||
TopologyPrepared {
|
||||
topology: String,
|
||||
outcome: Outcome,
|
||||
},
|
||||
HarmonyFinished,
|
||||
InterpretExecutionStarted {
|
||||
execution_id: String,
|
||||
topology: String,
|
||||
interpret: String,
|
||||
score: String,
|
||||
message: String,
|
||||
},
|
||||
InterpretExecutionFinished {
|
||||
execution_id: String,
|
||||
topology: String,
|
||||
interpret: String,
|
||||
score: String,
|
||||
outcome: Result<Outcome, InterpretError>,
|
||||
},
|
||||
TopologyStateChanged {
|
||||
topology: String,
|
||||
status: TopologyStatus,
|
||||
message: Option<String>,
|
||||
},
|
||||
ApplicationFeatureStateChanged {
|
||||
topology: String,
|
||||
application: String,
|
||||
feature: String,
|
||||
status: ApplicationFeatureStatus,
|
||||
},
|
||||
}
|
||||
|
||||
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
|
||||
// TODO: Adjust channel capacity
|
||||
let (tx, _rx) = broadcast::channel(100);
|
||||
tx
|
||||
});
|
||||
type Subscriber = Box<dyn Fn(&HarmonyEvent) + Send + Sync>;
|
||||
|
||||
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
|
||||
match HARMONY_EVENT_BUS.send(event) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err("send error: no subscribers"),
|
||||
}
|
||||
}
|
||||
static SUBSCRIBERS: Lazy<Mutex<HashMap<String, Subscriber>>> =
|
||||
Lazy::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
pub async fn subscribe<F, Fut>(name: &str, mut handler: F)
|
||||
/// Subscribes a listener to all instrumentation events.
|
||||
///
|
||||
/// Simply provide a unique name and a closure to run when an event happens.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use harmony::instrumentation;
|
||||
/// instrumentation::subscribe("my_logger", |event| {
|
||||
/// println!("Event occurred: {:?}", event);
|
||||
/// });
|
||||
/// ```
|
||||
pub fn subscribe<F>(name: &str, callback: F)
|
||||
where
|
||||
F: FnMut(HarmonyEvent) -> Fut + Send + 'static,
|
||||
Fut: Future<Output = bool> + Send,
|
||||
F: Fn(&HarmonyEvent) + Send + Sync + 'static,
|
||||
{
|
||||
let mut rx = HARMONY_EVENT_BUS.subscribe();
|
||||
debug!("[{name}] Service started. Listening for events...");
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
Ok(event) => {
|
||||
if !handler(event).await {
|
||||
debug!("[{name}] Handler requested exit.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(broadcast::error::RecvError::Lagged(n)) => {
|
||||
debug!("[{name}] Lagged behind by {n} messages.");
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
let mut subs = SUBSCRIBERS.lock().unwrap();
|
||||
subs.insert(name.to_string(), Box::new(callback));
|
||||
}
|
||||
|
||||
/// Instruments an event, notifying all subscribers.
|
||||
///
|
||||
/// This will call every closure that was registered with `subscribe`.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use harmony::instrumentation;
|
||||
/// use harmony::instrumentation::HarmonyEvent;
|
||||
/// instrumentation::instrument(HarmonyEvent::HarmonyStarted);
|
||||
/// ```
|
||||
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
|
||||
let subs = SUBSCRIBERS.lock().unwrap();
|
||||
|
||||
for callback in subs.values() {
|
||||
callback(&event);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use harmony_types::id::Id;
|
||||
use std::error::Error;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
|
||||
use super::{
|
||||
data::{Id, Version},
|
||||
executors::ExecutorError,
|
||||
inventory::Inventory,
|
||||
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
||||
};
|
||||
|
||||
pub enum InterpretName {
|
||||
@@ -23,6 +22,16 @@ pub enum InterpretName {
|
||||
TenantInterpret,
|
||||
Application,
|
||||
ArgoCD,
|
||||
Alerting,
|
||||
Ntfy,
|
||||
HelmChart,
|
||||
HelmCommand,
|
||||
K8sResource,
|
||||
Lamp,
|
||||
ApplicationMonitoring,
|
||||
K8sPrometheusCrdAlerting,
|
||||
DiscoverInventoryAgent,
|
||||
CephClusterHealth,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for InterpretName {
|
||||
@@ -41,6 +50,16 @@ impl std::fmt::Display for InterpretName {
|
||||
InterpretName::TenantInterpret => f.write_str("Tenant"),
|
||||
InterpretName::Application => f.write_str("Application"),
|
||||
InterpretName::ArgoCD => f.write_str("ArgoCD"),
|
||||
InterpretName::Alerting => f.write_str("Alerting"),
|
||||
InterpretName::Ntfy => f.write_str("Ntfy"),
|
||||
InterpretName::HelmChart => f.write_str("HelmChart"),
|
||||
InterpretName::HelmCommand => f.write_str("HelmCommand"),
|
||||
InterpretName::K8sResource => f.write_str("K8sResource"),
|
||||
InterpretName::Lamp => f.write_str("LAMP"),
|
||||
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
||||
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
||||
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -113,6 +132,14 @@ impl std::fmt::Display for InterpretError {
|
||||
}
|
||||
impl Error for InterpretError {}
|
||||
|
||||
impl From<PreparationError> for InterpretError {
|
||||
fn from(value: PreparationError) -> Self {
|
||||
Self {
|
||||
msg: format!("InterpretError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ExecutorError> for InterpretError {
|
||||
fn from(value: ExecutorError) -> Self {
|
||||
Self {
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
mod repository;
|
||||
pub use repository::*;
|
||||
|
||||
#[derive(Debug, new, Clone)]
|
||||
pub struct InventoryFilter {
|
||||
target: Vec<Filter>,
|
||||
@@ -15,6 +18,8 @@ impl InventoryFilter {
|
||||
use derive_new::new;
|
||||
use log::info;
|
||||
|
||||
use crate::hardware::{ManagementInterface, ManualManagementInterface};
|
||||
|
||||
use super::{
|
||||
filter::Filter,
|
||||
hardware::{FirewallGroup, HostGroup, Location, SwitchGroup},
|
||||
@@ -27,7 +32,7 @@ pub struct Inventory {
|
||||
// Firewall is really just a host but with somewhat specialized hardware
|
||||
// I'm not entirely sure it belongs to its own category but it helps make things easier and
|
||||
// clearer for now so let's try it this way.
|
||||
pub firewall: FirewallGroup,
|
||||
pub firewall_mgmt: Box<dyn ManagementInterface>,
|
||||
pub worker_host: HostGroup,
|
||||
pub storage_host: HostGroup,
|
||||
pub control_plane_host: HostGroup,
|
||||
@@ -38,7 +43,7 @@ impl Inventory {
|
||||
Self {
|
||||
location: Location::new("Empty".to_string(), "location".to_string()),
|
||||
switch: vec![],
|
||||
firewall: vec![],
|
||||
firewall_mgmt: Box::new(ManualManagementInterface {}),
|
||||
worker_host: vec![],
|
||||
storage_host: vec![],
|
||||
control_plane_host: vec![],
|
||||
@@ -49,7 +54,7 @@ impl Inventory {
|
||||
Self {
|
||||
location: Location::test_building(),
|
||||
switch: SwitchGroup::new(),
|
||||
firewall: FirewallGroup::new(),
|
||||
firewall_mgmt: Box::new(ManualManagementInterface {}),
|
||||
worker_host: HostGroup::new(),
|
||||
storage_host: HostGroup::new(),
|
||||
control_plane_host: HostGroup::new(),
|
||||
|
||||
25
harmony/src/domain/inventory/repository.rs
Normal file
25
harmony/src/domain/inventory/repository.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::hardware::PhysicalHost;
|
||||
|
||||
/// Errors that can occur within the repository layer.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum RepoError {
|
||||
#[error("Database query failed: {0}")]
|
||||
QueryFailed(String),
|
||||
#[error("Data serialization failed: {0}")]
|
||||
Serialization(String),
|
||||
#[error("Data deserialization failed: {0}")]
|
||||
Deserialization(String),
|
||||
#[error("Could not connect to the database: {0}")]
|
||||
ConnectionFailed(String),
|
||||
}
|
||||
|
||||
// --- Trait and Implementation ---
|
||||
|
||||
/// Defines the contract for inventory persistence.
|
||||
#[async_trait]
|
||||
pub trait InventoryRepository: Send + Sync + 'static {
|
||||
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>;
|
||||
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>;
|
||||
}
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use log::{debug, info, warn};
|
||||
use log::{debug, warn};
|
||||
|
||||
use crate::instrumentation::{self, HarmonyEvent};
|
||||
use crate::topology::TopologyStatus;
|
||||
|
||||
use super::{
|
||||
interpret::{InterpretError, InterpretStatus, Outcome},
|
||||
interpret::{InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
topology::{PreparationError, PreparationOutcome, Topology, TopologyState},
|
||||
};
|
||||
|
||||
type ScoreVec<T> = Vec<Box<dyn Score<T>>>;
|
||||
@@ -17,7 +17,7 @@ pub struct Maestro<T: Topology> {
|
||||
inventory: Inventory,
|
||||
topology: T,
|
||||
scores: Arc<RwLock<ScoreVec<T>>>,
|
||||
topology_preparation_result: Mutex<Option<Outcome>>,
|
||||
topology_state: TopologyState,
|
||||
}
|
||||
|
||||
impl<T: Topology> Maestro<T> {
|
||||
@@ -25,41 +25,46 @@ impl<T: Topology> Maestro<T> {
|
||||
///
|
||||
/// This should rarely be used. Most of the time Maestro::initialize should be used instead.
|
||||
pub fn new_without_initialization(inventory: Inventory, topology: T) -> Self {
|
||||
let topology_name = topology.name().to_string();
|
||||
|
||||
Self {
|
||||
inventory,
|
||||
topology,
|
||||
scores: Arc::new(RwLock::new(Vec::new())),
|
||||
topology_preparation_result: None.into(),
|
||||
topology_state: TopologyState::new(topology_name),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
|
||||
let instance = Self::new_without_initialization(inventory, topology);
|
||||
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, PreparationError> {
|
||||
let mut instance = Self::new_without_initialization(inventory, topology);
|
||||
instance.prepare_topology().await?;
|
||||
Ok(instance)
|
||||
}
|
||||
|
||||
/// Ensures the associated Topology is ready for operations.
|
||||
/// Delegates the readiness check and potential setup actions to the Topology.
|
||||
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
|
||||
instrumentation::instrument(HarmonyEvent::PrepareTopologyStarted {
|
||||
topology: self.topology.name().to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
async fn prepare_topology(&mut self) -> Result<PreparationOutcome, PreparationError> {
|
||||
self.topology_state.prepare();
|
||||
|
||||
let outcome = self.topology.ensure_ready().await?;
|
||||
let result = self.topology.ensure_ready().await;
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::TopologyPrepared {
|
||||
topology: self.topology.name().to_string(),
|
||||
outcome: outcome.clone(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
self.topology_preparation_result
|
||||
.lock()
|
||||
.unwrap()
|
||||
.replace(outcome.clone());
|
||||
Ok(outcome)
|
||||
match result {
|
||||
Ok(outcome) => {
|
||||
match outcome.clone() {
|
||||
PreparationOutcome::Success { details } => {
|
||||
self.topology_state.success(details);
|
||||
}
|
||||
PreparationOutcome::Noop => {
|
||||
self.topology_state.noop();
|
||||
}
|
||||
};
|
||||
Ok(outcome)
|
||||
}
|
||||
Err(err) => {
|
||||
self.topology_state.error(err.to_string());
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
|
||||
@@ -68,15 +73,8 @@ impl<T: Topology> Maestro<T> {
|
||||
}
|
||||
|
||||
fn is_topology_initialized(&self) -> bool {
|
||||
let result = self.topology_preparation_result.lock().unwrap();
|
||||
if let Some(outcome) = result.as_ref() {
|
||||
match outcome.status {
|
||||
InterpretStatus::SUCCESS => return true,
|
||||
_ => return false,
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
self.topology_state.status == TopologyStatus::Success
|
||||
|| self.topology_state.status == TopologyStatus::Noop
|
||||
}
|
||||
|
||||
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
|
||||
@@ -87,10 +85,8 @@ impl<T: Topology> Maestro<T> {
|
||||
self.topology.name(),
|
||||
);
|
||||
}
|
||||
debug!("Running score {score:?}");
|
||||
let interpret = score.create_interpret();
|
||||
debug!("Launching interpret {interpret:?}");
|
||||
let result = interpret.execute(&self.inventory, &self.topology).await;
|
||||
debug!("Interpreting score {score:?}");
|
||||
let result = score.interpret(&self.inventory, &self.topology).await;
|
||||
debug!("Got result {result:?}");
|
||||
result
|
||||
}
|
||||
|
||||
@@ -1,22 +1,62 @@
|
||||
use harmony_types::id::Id;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
use serde_value::Value;
|
||||
|
||||
use super::{interpret::Interpret, topology::Topology};
|
||||
use super::{
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
interpret::{Interpret, InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::Topology,
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Score<T: Topology>:
|
||||
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
|
||||
async fn interpret(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let id = Id::default();
|
||||
let interpret = self.create_interpret();
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
|
||||
execution_id: id.clone().to_string(),
|
||||
topology: topology.name().into(),
|
||||
interpret: interpret.get_name().to_string(),
|
||||
score: self.name(),
|
||||
message: format!("{} running...", interpret.get_name()),
|
||||
})
|
||||
.unwrap();
|
||||
let result = interpret.execute(inventory, topology).await;
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
|
||||
execution_id: id.clone().to_string(),
|
||||
topology: topology.name().into(),
|
||||
interpret: interpret.get_name().to_string(),
|
||||
score: self.name(),
|
||||
outcome: result.clone(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn name(&self) -> String;
|
||||
|
||||
#[doc(hidden)]
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
|
||||
}
|
||||
|
||||
pub trait SerializeScore<T: Topology> {
|
||||
fn serialize(&self) -> Value;
|
||||
}
|
||||
|
||||
impl<'de, S, T> SerializeScore<T> for S
|
||||
impl<S, T> SerializeScore<T> for S
|
||||
where
|
||||
T: Topology,
|
||||
S: Score<T> + Serialize,
|
||||
@@ -24,7 +64,7 @@ where
|
||||
fn serialize(&self) -> Value {
|
||||
// TODO not sure if this is the right place to handle the error or it should bubble
|
||||
// up?
|
||||
serde_value::to_value(&self).expect("Score should serialize successfully")
|
||||
serde_value::to_value(self).expect("Score should serialize successfully")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::net::Url;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
|
||||
use crate::data::FileContent;
|
||||
use crate::executors::ExecutorError;
|
||||
use crate::interpret::InterpretError;
|
||||
use crate::interpret::Outcome;
|
||||
use crate::topology::PxeOptions;
|
||||
|
||||
use super::DHCPStaticEntry;
|
||||
use super::DhcpServer;
|
||||
@@ -19,11 +21,12 @@ use super::K8sclient;
|
||||
use super::LoadBalancer;
|
||||
use super::LoadBalancerService;
|
||||
use super::LogicalHost;
|
||||
use super::PreparationError;
|
||||
use super::PreparationOutcome;
|
||||
use super::Router;
|
||||
use super::TftpServer;
|
||||
|
||||
use super::Topology;
|
||||
use super::Url;
|
||||
use super::k8s::K8sClient;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -48,10 +51,11 @@ impl Topology for HAClusterTopology {
|
||||
fn name(&self) -> &str {
|
||||
"HAClusterTopology"
|
||||
}
|
||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
||||
todo!(
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
debug!(
|
||||
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
|
||||
)
|
||||
);
|
||||
Ok(PreparationOutcome::Noop)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,12 +157,10 @@ impl DhcpServer for HAClusterTopology {
|
||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
|
||||
self.dhcp_server.list_static_mappings().await
|
||||
}
|
||||
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_next_server(ip).await
|
||||
}
|
||||
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_boot_filename(boot_filename).await
|
||||
async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_pxe_options(options).await
|
||||
}
|
||||
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
self.dhcp_server.get_ip()
|
||||
}
|
||||
@@ -168,16 +170,6 @@ impl DhcpServer for HAClusterTopology {
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.commit_config().await
|
||||
}
|
||||
|
||||
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_filename(filename).await
|
||||
}
|
||||
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_filename64(filename64).await
|
||||
}
|
||||
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_filenameipxe(filenameipxe).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -221,17 +213,21 @@ impl HttpServer for HAClusterTopology {
|
||||
self.http_server.serve_files(url).await
|
||||
}
|
||||
|
||||
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
|
||||
self.http_server.serve_file_content(file).await
|
||||
}
|
||||
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
self.http_server.get_ip()
|
||||
}
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
self.http_server.ensure_initialized().await
|
||||
}
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
self.http_server.commit_config().await
|
||||
}
|
||||
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
self.http_server.reload_restart().await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,13 +237,15 @@ pub struct DummyInfra;
|
||||
#[async_trait]
|
||||
impl Topology for DummyInfra {
|
||||
fn name(&self) -> &str {
|
||||
todo!()
|
||||
"DummyInfra"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
let dummy_msg = "This is a dummy infrastructure that does nothing";
|
||||
info!("{dummy_msg}");
|
||||
Ok(Outcome::success(dummy_msg.to_string()))
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: dummy_msg.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,19 +295,7 @@ impl DhcpServer for DummyInfra {
|
||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_next_server(&self, _ip: IpAddress) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_filename64(&self, _filename: &str) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_filenameipxe(&self, _filenameipxe: &str) -> Result<(), ExecutorError> {
|
||||
async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
@@ -379,6 +365,9 @@ impl HttpServer for DummyInfra {
|
||||
async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use crate::executors::ExecutorError;
|
||||
use crate::{data::FileContent, executors::ExecutorError};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::{IpAddress, Url};
|
||||
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::Url;
|
||||
#[async_trait]
|
||||
pub trait HttpServer: Send + Sync {
|
||||
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
|
||||
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>;
|
||||
fn get_ip(&self) -> IpAddress;
|
||||
|
||||
// async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError>;
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use derive_new::new;
|
||||
use futures_util::StreamExt;
|
||||
use k8s_openapi::{
|
||||
ClusterResourceScope, NamespaceResourceScope,
|
||||
api::{apps::v1::Deployment, core::v1::Pod},
|
||||
};
|
||||
use kube::{
|
||||
Client, Config, Error, Resource,
|
||||
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
runtime::reflector::Lookup,
|
||||
@@ -17,14 +16,25 @@ use kube::{
|
||||
runtime::wait::await_condition,
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
use serde::de::DeserializeOwned;
|
||||
use similar::{DiffableStr, TextDiff};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::json;
|
||||
use similar::TextDiff;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
#[derive(new, Clone)]
|
||||
pub struct K8sClient {
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl Serialize for K8sClient {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for K8sClient {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// This is a poor man's debug implementation for now as kube::Client does not provide much
|
||||
@@ -43,6 +53,66 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_deployment(
|
||||
&self,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<Option<Deployment>, Error> {
|
||||
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
||||
Api::namespaced(self.client.clone(), ns)
|
||||
} else {
|
||||
Api::default_namespaced(self.client.clone())
|
||||
};
|
||||
Ok(deps.get_opt(name).await?)
|
||||
}
|
||||
|
||||
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
||||
let pods: Api<Pod> = if let Some(ns) = namespace {
|
||||
Api::namespaced(self.client.clone(), ns)
|
||||
} else {
|
||||
Api::default_namespaced(self.client.clone())
|
||||
};
|
||||
Ok(pods.get_opt(name).await?)
|
||||
}
|
||||
|
||||
pub async fn scale_deployment(
|
||||
&self,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
replicas: u32,
|
||||
) -> Result<(), Error> {
|
||||
let deployments: Api<Deployment> = if let Some(ns) = namespace {
|
||||
Api::namespaced(self.client.clone(), ns)
|
||||
} else {
|
||||
Api::default_namespaced(self.client.clone())
|
||||
};
|
||||
|
||||
let patch = json!({
|
||||
"spec": {
|
||||
"replicas": replicas
|
||||
}
|
||||
});
|
||||
let pp = PatchParams::default();
|
||||
let scale = Patch::Apply(&patch);
|
||||
deployments.patch_scale(name, &pp, &scale).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_deployment(
|
||||
&self,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
let deployments: Api<Deployment> = if let Some(ns) = namespace {
|
||||
Api::namespaced(self.client.clone(), ns)
|
||||
} else {
|
||||
Api::default_namespaced(self.client.clone())
|
||||
};
|
||||
let delete_params = DeleteParams::default();
|
||||
deployments.delete(name, &delete_params).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn wait_until_deployment_ready(
|
||||
&self,
|
||||
name: String,
|
||||
@@ -58,13 +128,78 @@ impl K8sClient {
|
||||
}
|
||||
|
||||
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
||||
let t = if let Some(t) = timeout { t } else { 300 };
|
||||
let t = timeout.unwrap_or(300);
|
||||
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
|
||||
|
||||
if let Ok(r) = res {
|
||||
return Ok(());
|
||||
if res.is_ok() {
|
||||
Ok(())
|
||||
} else {
|
||||
return Err("timed out while waiting for deployment".to_string());
|
||||
Err("timed out while waiting for deployment".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Will execute a commond in the first pod found that matches the specified label
|
||||
/// '{label}={name}'
|
||||
pub async fn exec_app_capture_output(
|
||||
&self,
|
||||
name: String,
|
||||
label: String,
|
||||
namespace: Option<&str>,
|
||||
command: Vec<&str>,
|
||||
) -> Result<String, String> {
|
||||
let api: Api<Pod>;
|
||||
|
||||
if let Some(ns) = namespace {
|
||||
api = Api::namespaced(self.client.clone(), ns);
|
||||
} else {
|
||||
api = Api::default_namespaced(self.client.clone());
|
||||
}
|
||||
let pod_list = api
|
||||
.list(&ListParams::default().labels(format!("{label}={name}").as_str()))
|
||||
.await
|
||||
.expect("couldn't get list of pods");
|
||||
|
||||
let res = api
|
||||
.exec(
|
||||
pod_list
|
||||
.items
|
||||
.first()
|
||||
.expect("couldn't get pod")
|
||||
.name()
|
||||
.expect("couldn't get pod name")
|
||||
.into_owned()
|
||||
.as_str(),
|
||||
command,
|
||||
&AttachParams::default().stdout(true).stderr(true),
|
||||
)
|
||||
.await;
|
||||
match res {
|
||||
Err(e) => Err(e.to_string()),
|
||||
Ok(mut process) => {
|
||||
let status = process
|
||||
.take_status()
|
||||
.expect("Couldn't get status")
|
||||
.await
|
||||
.expect("Couldn't unwrap status");
|
||||
|
||||
if let Some(s) = status.status {
|
||||
let mut stdout_buf = String::new();
|
||||
if let Some(mut stdout) = process.stdout().take() {
|
||||
stdout
|
||||
.read_to_string(&mut stdout_buf)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get status stdout {e}"))?;
|
||||
}
|
||||
debug!("Status: {} - {:?}", s, status.details);
|
||||
if s == "Success" {
|
||||
Ok(stdout_buf)
|
||||
} else {
|
||||
Err(s)
|
||||
}
|
||||
} else {
|
||||
Err("Couldn't get inner status of pod exec".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +238,7 @@ impl K8sClient {
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Err(e) => return Err(e.to_string()),
|
||||
Err(e) => Err(e.to_string()),
|
||||
Ok(mut process) => {
|
||||
let status = process
|
||||
.take_status()
|
||||
@@ -112,14 +247,10 @@ impl K8sClient {
|
||||
.expect("Couldn't unwrap status");
|
||||
|
||||
if let Some(s) = status.status {
|
||||
debug!("Status: {}", s);
|
||||
if s == "Success" {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(s);
|
||||
}
|
||||
debug!("Status: {} - {:?}", s, status.details);
|
||||
if s == "Success" { Ok(()) } else { Err(s) }
|
||||
} else {
|
||||
return Err("Couldn't get inner status of pod exec".to_string());
|
||||
Err("Couldn't get inner status of pod exec".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -160,8 +291,9 @@ impl K8sClient {
|
||||
trace!("Received current value {current:#?}");
|
||||
// The resource exists, so we calculate and display a diff.
|
||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
||||
let mut current_yaml = serde_yaml::to_value(¤t)
|
||||
.expect(&format!("Could not serialize current value : {current:#?}"));
|
||||
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
||||
panic!("Could not serialize current value : {current:#?}")
|
||||
});
|
||||
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
||||
let map = current_yaml.as_mapping_mut().unwrap();
|
||||
let removed = map.remove_entry("status");
|
||||
@@ -228,7 +360,7 @@ impl K8sClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
|
||||
pub async fn apply_many<K>(&self, resource: &[K], ns: Option<&str>) -> Result<Vec<K>, Error>
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
@@ -244,7 +376,7 @@ impl K8sClient {
|
||||
|
||||
pub async fn apply_yaml_many(
|
||||
&self,
|
||||
yaml: &Vec<serde_yaml::Value>,
|
||||
#[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>,
|
||||
ns: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
for y in yaml.iter() {
|
||||
|
||||
@@ -7,22 +7,40 @@ use tokio::sync::OnceCell;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
interpret::{InterpretError, Outcome},
|
||||
interpret::InterpretStatus,
|
||||
inventory::Inventory,
|
||||
modules::k3d::K3DInstallationScore,
|
||||
modules::{
|
||||
k3d::K3DInstallationScore,
|
||||
monitoring::kube_prometheus::crd::{
|
||||
crd_alertmanager_config::CRDPrometheus,
|
||||
prometheus_operator::prometheus_operator_helm_chart_score,
|
||||
},
|
||||
prometheus::{
|
||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||
prometheus::PrometheusApplicationMonitoring,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
};
|
||||
|
||||
use super::{
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology,
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
|
||||
PreparationOutcome, Topology,
|
||||
k8s::K8sClient,
|
||||
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
|
||||
oberservability::monitoring::AlertReceiver,
|
||||
tenant::{
|
||||
TenantConfig, TenantManager,
|
||||
k8s::K8sTenantManager,
|
||||
network_policy::{
|
||||
K3dNetworkPolicyStrategy, NetworkPolicyStrategy, NoopNetworkPolicyStrategy,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct K8sState {
|
||||
client: Arc<K8sClient>,
|
||||
_source: K8sSource,
|
||||
source: K8sSource,
|
||||
message: String,
|
||||
}
|
||||
|
||||
@@ -56,8 +74,42 @@ impl K8sclient for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
||||
async fn install_prometheus(
|
||||
&self,
|
||||
sender: &CRDPrometheus,
|
||||
inventory: &Inventory,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
let po_result = self.ensure_prometheus_operator(sender).await?;
|
||||
|
||||
if po_result == PreparationOutcome::Noop {
|
||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
||||
return Ok(po_result);
|
||||
}
|
||||
|
||||
let result = self
|
||||
.get_k8s_prometheus_application_score(sender.clone(), receivers)
|
||||
.await
|
||||
.interpret(inventory, self)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(outcome) => match outcome.status {
|
||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
||||
details: outcome.message,
|
||||
}),
|
||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
||||
_ => Err(PreparationError::new(outcome.message)),
|
||||
},
|
||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for K8sAnywhereTopology {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
@@ -82,6 +134,19 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_k8s_prometheus_application_score(
|
||||
&self,
|
||||
sender: CRDPrometheus,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
||||
) -> K8sPrometheusCRDAlertingScore {
|
||||
K8sPrometheusCRDAlertingScore {
|
||||
sender,
|
||||
receivers: receivers.unwrap_or_default(),
|
||||
service_monitors: vec![],
|
||||
prometheus_rules: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn is_helm_available(&self) -> Result<(), String> {
|
||||
let version_result = Command::new("helm")
|
||||
.arg("version")
|
||||
@@ -110,15 +175,23 @@ impl K8sAnywhereTopology {
|
||||
K3DInstallationScore::default()
|
||||
}
|
||||
|
||||
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
|
||||
self.get_k3d_installation_score()
|
||||
.create_interpret()
|
||||
.execute(&Inventory::empty(), self)
|
||||
.await?;
|
||||
Ok(())
|
||||
async fn try_install_k3d(&self) -> Result<(), PreparationError> {
|
||||
let result = self
|
||||
.get_k3d_installation_score()
|
||||
.interpret(&Inventory::empty(), self)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(outcome) => match outcome.status {
|
||||
InterpretStatus::SUCCESS => Ok(()),
|
||||
InterpretStatus::NOOP => Ok(()),
|
||||
_ => Err(PreparationError::new(outcome.message)),
|
||||
},
|
||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
|
||||
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, PreparationError> {
|
||||
let k8s_anywhere_config = &self.config;
|
||||
|
||||
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
|
||||
@@ -128,16 +201,16 @@ impl K8sAnywhereTopology {
|
||||
} else {
|
||||
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
|
||||
debug!("Loading kubeconfig {kubeconfig}");
|
||||
match self.try_load_kubeconfig(&kubeconfig).await {
|
||||
match self.try_load_kubeconfig(kubeconfig).await {
|
||||
Some(client) => {
|
||||
return Ok(Some(K8sState {
|
||||
client: Arc::new(client),
|
||||
_source: K8sSource::Kubeconfig,
|
||||
source: K8sSource::Kubeconfig,
|
||||
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
|
||||
}));
|
||||
}
|
||||
None => {
|
||||
return Err(InterpretError::new(format!(
|
||||
return Err(PreparationError::new(format!(
|
||||
"Failed to load kubeconfig from {kubeconfig}"
|
||||
)));
|
||||
}
|
||||
@@ -174,7 +247,7 @@ impl K8sAnywhereTopology {
|
||||
let state = match k3d.get_client().await {
|
||||
Ok(client) => K8sState {
|
||||
client: Arc::new(K8sClient::new(client)),
|
||||
_source: K8sSource::LocalK3d,
|
||||
source: K8sSource::LocalK3d,
|
||||
message: "K8s client ready".to_string(),
|
||||
},
|
||||
Err(_) => todo!(),
|
||||
@@ -183,15 +256,21 @@ impl K8sAnywhereTopology {
|
||||
Ok(Some(state))
|
||||
}
|
||||
|
||||
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
|
||||
if let Some(_) = self.tenant_manager.get() {
|
||||
async fn ensure_k8s_tenant_manager(&self, k8s_state: &K8sState) -> Result<(), String> {
|
||||
if self.tenant_manager.get().is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.tenant_manager
|
||||
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
|
||||
let k8s_client = self.k8s_client().await?;
|
||||
Ok(K8sTenantManager::new(k8s_client))
|
||||
let network_policy_strategy: Box<dyn NetworkPolicyStrategy> = match k8s_state.source
|
||||
{
|
||||
K8sSource::LocalK3d => Box::new(K3dNetworkPolicyStrategy::new()),
|
||||
K8sSource::Kubeconfig => Box::new(NoopNetworkPolicyStrategy::new()),
|
||||
};
|
||||
|
||||
Ok(K8sTenantManager::new(k8s_client, network_policy_strategy))
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -206,6 +285,55 @@ impl K8sAnywhereTopology {
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn ensure_prometheus_operator(
|
||||
&self,
|
||||
sender: &CRDPrometheus,
|
||||
) -> Result<PreparationOutcome, PreparationError> {
|
||||
let status = Command::new("sh")
|
||||
.args(["-c", "kubectl get crd -A | grep -i prometheuses"])
|
||||
.status()
|
||||
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
|
||||
|
||||
if !status.success() {
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
debug!("installing prometheus operator");
|
||||
let op_score =
|
||||
prometheus_operator_helm_chart_score(sender.namespace.clone());
|
||||
let result = op_score.interpret(&Inventory::empty(), self).await;
|
||||
|
||||
return match result {
|
||||
Ok(outcome) => match outcome.status {
|
||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
||||
details: "installed prometheus operator".into(),
|
||||
}),
|
||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
||||
_ => Err(PreparationError::new(
|
||||
"failed to install prometheus operator (unknown error)".into(),
|
||||
)),
|
||||
},
|
||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
||||
};
|
||||
}
|
||||
K8sSource::Kubeconfig => {
|
||||
debug!("unable to install prometheus operator, contact cluster admin");
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Unable to detect k8s_state. Skipping Prometheus Operator install.");
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Prometheus operator is already present, skipping install");
|
||||
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: "prometheus operator present in cluster".into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -263,26 +391,25 @@ impl Topology for K8sAnywhereTopology {
|
||||
"K8sAnywhereTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
let k8s_state = self
|
||||
.k8s_state
|
||||
.get_or_try_init(|| self.try_get_or_install_k8s_client())
|
||||
.await?;
|
||||
|
||||
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
|
||||
"No K8s client could be found or installed".to_string(),
|
||||
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(PreparationError::new(
|
||||
"no K8s client could be found or installed".to_string(),
|
||||
))?;
|
||||
|
||||
self.ensure_k8s_tenant_manager()
|
||||
self.ensure_k8s_tenant_manager(k8s_state)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e))?;
|
||||
.map_err(PreparationError::new)?;
|
||||
|
||||
match self.is_helm_available() {
|
||||
Ok(()) => Ok(Outcome::success(format!(
|
||||
"{} + helm available",
|
||||
k8s_state.message.clone()
|
||||
))),
|
||||
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
|
||||
Ok(()) => Ok(PreparationOutcome::Success {
|
||||
details: format!("{} + helm available", k8s_state.message.clone()),
|
||||
}),
|
||||
Err(e) => Err(PreparationError::new(format!("helm unavailable: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,9 @@ use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
|
||||
use super::{IpAddress, LogicalHost};
|
||||
use super::LogicalHost;
|
||||
use crate::executors::ExecutorError;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
impl std::fmt::Debug for dyn LoadBalancer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
|
||||
use crate::interpret::{InterpretError, Outcome};
|
||||
|
||||
use super::{HelmCommand, Topology};
|
||||
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
||||
|
||||
#[derive(new)]
|
||||
pub struct LocalhostTopology;
|
||||
@@ -14,10 +12,10 @@ impl Topology for LocalhostTopology {
|
||||
"LocalHostTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
||||
Ok(Outcome::success(
|
||||
"Localhost is Chuck Norris, always ready.".to_string(),
|
||||
))
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
Ok(PreparationOutcome::Success {
|
||||
details: "Localhost is Chuck Norris, always ready.".into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
mod ha_cluster;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
pub mod installable;
|
||||
@@ -6,6 +7,7 @@ mod k8s_anywhere;
|
||||
mod localhost;
|
||||
pub mod oberservability;
|
||||
pub mod tenant;
|
||||
use derive_new::new;
|
||||
pub use k8s_anywhere::*;
|
||||
pub use localhost::*;
|
||||
pub mod k8s;
|
||||
@@ -26,9 +28,11 @@ pub use tftp::*;
|
||||
mod helm_command;
|
||||
pub use helm_command::*;
|
||||
|
||||
use std::net::IpAddr;
|
||||
|
||||
use super::interpret::{InterpretError, Outcome};
|
||||
use super::{
|
||||
executors::ExecutorError,
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
};
|
||||
use std::error::Error;
|
||||
|
||||
/// Represents a logical view of an infrastructure environment providing specific capabilities.
|
||||
///
|
||||
@@ -57,9 +61,128 @@ pub trait Topology: Send + Sync {
|
||||
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
|
||||
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
|
||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
|
||||
/// - `Ok(PreparationOutcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
|
||||
/// - `Err(PreparationError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum PreparationOutcome {
|
||||
Success { details: String },
|
||||
Noop,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, new)]
|
||||
pub struct PreparationError {
|
||||
msg: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PreparationError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for PreparationError {}
|
||||
|
||||
impl From<ExecutorError> for PreparationError {
|
||||
fn from(value: ExecutorError) -> Self {
|
||||
Self {
|
||||
msg: format!("InterpretError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<kube::Error> for PreparationError {
|
||||
fn from(value: kube::Error) -> Self {
|
||||
Self {
|
||||
msg: format!("PreparationError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for PreparationError {
|
||||
fn from(value: String) -> Self {
|
||||
Self {
|
||||
msg: format!("PreparationError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum TopologyStatus {
|
||||
Queued,
|
||||
Preparing,
|
||||
Success,
|
||||
Noop,
|
||||
Error,
|
||||
}
|
||||
|
||||
pub struct TopologyState {
|
||||
pub topology: String,
|
||||
pub status: TopologyStatus,
|
||||
}
|
||||
|
||||
impl TopologyState {
|
||||
pub fn new(topology: String) -> Self {
|
||||
let instance = Self {
|
||||
topology,
|
||||
status: TopologyStatus::Queued,
|
||||
};
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
|
||||
topology: instance.topology.clone(),
|
||||
status: instance.status.clone(),
|
||||
message: None,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
instance
|
||||
}
|
||||
|
||||
pub fn prepare(&mut self) {
|
||||
self.status = TopologyStatus::Preparing;
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
|
||||
topology: self.topology.clone(),
|
||||
status: self.status.clone(),
|
||||
message: None,
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn success(&mut self, message: String) {
|
||||
self.status = TopologyStatus::Success;
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
|
||||
topology: self.topology.clone(),
|
||||
status: self.status.clone(),
|
||||
message: Some(message),
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn noop(&mut self) {
|
||||
self.status = TopologyStatus::Noop;
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
|
||||
topology: self.topology.clone(),
|
||||
status: self.status.clone(),
|
||||
message: None,
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn error(&mut self, message: String) {
|
||||
self.status = TopologyStatus::Error;
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
|
||||
topology: self.topology.clone(),
|
||||
status: self.status.clone(),
|
||||
message: Some(message),
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -73,35 +196,6 @@ pub trait MultiTargetTopology: Topology {
|
||||
fn current_target(&self) -> DeploymentTarget;
|
||||
}
|
||||
|
||||
pub type IpAddress = IpAddr;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Url {
|
||||
LocalFolder(String),
|
||||
Url(url::Url),
|
||||
}
|
||||
|
||||
impl Serialize for Url {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Url::LocalFolder(path) => serializer.serialize_str(path),
|
||||
Url::Url(url) => serializer.serialize_str(&url.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Url {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Url::LocalFolder(path) => write!(f, "{}", path),
|
||||
Url::Url(url) => write!(f, "{}", url),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a logical member of a cluster that provides one or more services.
|
||||
///
|
||||
/// A LogicalHost can represent various roles within the infrastructure, such as:
|
||||
@@ -140,7 +234,8 @@ impl LogicalHost {
|
||||
///
|
||||
/// ```
|
||||
/// use std::str::FromStr;
|
||||
/// use harmony::topology::{IpAddress, LogicalHost};
|
||||
/// use harmony::topology::{LogicalHost};
|
||||
/// use harmony_types::net::IpAddress;
|
||||
///
|
||||
/// let start_ip = IpAddress::from_str("192.168.0.20").unwrap();
|
||||
/// let hosts = LogicalHost::create_hosts(3, start_ip, "worker");
|
||||
@@ -196,7 +291,7 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use harmony_types::net::Url;
|
||||
use serde_json;
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::net::{IpAddress, MacAddress};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
use super::{IpAddress, LogicalHost, k8s::K8sClient};
|
||||
use super::{LogicalHost, k8s::K8sClient};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DHCPStaticEntry {
|
||||
@@ -46,16 +46,19 @@ pub trait K8sclient: Send + Sync {
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
|
||||
}
|
||||
|
||||
pub struct PxeOptions {
|
||||
pub ipxe_filename: String,
|
||||
pub bios_filename: String,
|
||||
pub efi_filename: String,
|
||||
pub tftp_ip: Option<IpAddress>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
|
||||
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
||||
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
||||
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>;
|
||||
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>;
|
||||
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>;
|
||||
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError>;
|
||||
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError>;
|
||||
async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>;
|
||||
fn get_ip(&self) -> IpAddress;
|
||||
fn get_host(&self) -> LogicalHost;
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError>;
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
use std::any::Any;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::{Topology, installable::Installable},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[async_trait]
|
||||
pub trait AlertSender: Send + Sync + std::fmt::Debug {
|
||||
@@ -43,7 +46,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
todo!()
|
||||
InterpretName::Alerting
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
@@ -62,7 +65,9 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
||||
#[async_trait]
|
||||
pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
||||
fn name(&self) -> String;
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -72,6 +77,6 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ScrapeTarger<S: AlertSender> {
|
||||
pub trait ScrapeTarget<S: AlertSender> {
|
||||
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
|
||||
}
|
||||
|
||||
@@ -27,11 +27,11 @@ pub struct UnmanagedRouter {
|
||||
|
||||
impl Router for UnmanagedRouter {
|
||||
fn get_gateway(&self) -> IpAddress {
|
||||
self.gateway.clone()
|
||||
self.gateway
|
||||
}
|
||||
|
||||
fn get_cidr(&self) -> Ipv4Cidr {
|
||||
self.cidr.clone()
|
||||
self.cidr
|
||||
}
|
||||
|
||||
fn get_host(&self) -> LogicalHost {
|
||||
|
||||
@@ -15,36 +15,38 @@ use k8s_openapi::{
|
||||
apimachinery::pkg::util::intstr::IntOrString,
|
||||
};
|
||||
use kube::Resource;
|
||||
use log::{debug, info, warn};
|
||||
use log::debug;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use super::{TenantConfig, TenantManager};
|
||||
use super::{TenantConfig, TenantManager, network_policy::NetworkPolicyStrategy};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct K8sTenantManager {
|
||||
k8s_client: Arc<K8sClient>,
|
||||
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
|
||||
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
|
||||
}
|
||||
|
||||
impl K8sTenantManager {
|
||||
pub fn new(client: Arc<K8sClient>) -> Self {
|
||||
pub fn new(
|
||||
client: Arc<K8sClient>,
|
||||
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
|
||||
) -> Self {
|
||||
Self {
|
||||
k8s_client: client,
|
||||
k8s_tenant_config: Arc::new(OnceCell::new()),
|
||||
network_policy_strategy,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sTenantManager {
|
||||
fn get_namespace_name(&self, config: &TenantConfig) -> String {
|
||||
config.name.clone()
|
||||
}
|
||||
|
||||
fn ensure_constraints(&self, _namespace: &Namespace) -> Result<(), ExecutorError> {
|
||||
warn!("Validate that when tenant already exists (by id) that name has not changed");
|
||||
warn!("Make sure other Tenant constraints are respected by this k8s implementation");
|
||||
// TODO: Ensure constraints are applied to namespace (https://git.nationtech.io/NationTech/harmony/issues/98)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -219,24 +221,6 @@ impl K8sTenantManager {
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
"ipBlock": {
|
||||
"cidr": "10.43.0.1/32",
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
"ipBlock": {
|
||||
"cidr": "172.23.0.0/16",
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
@@ -304,19 +288,19 @@ impl K8sTenantManager {
|
||||
let ports: Option<Vec<NetworkPolicyPort>> =
|
||||
c.1.as_ref().map(|spec| match &spec.data {
|
||||
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(port.clone().into())),
|
||||
port: Some(IntOrString::Int((*port).into())),
|
||||
..Default::default()
|
||||
}],
|
||||
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(start.clone().into())),
|
||||
end_port: Some(end.clone().into()),
|
||||
port: Some(IntOrString::Int((*start).into())),
|
||||
end_port: Some((*end).into()),
|
||||
protocol: None, // Not currently supported by Harmony
|
||||
}],
|
||||
|
||||
super::PortSpecData::ListOfPorts(items) => items
|
||||
.iter()
|
||||
.map(|i| NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(i.clone().into())),
|
||||
port: Some(IntOrString::Int((*i).into())),
|
||||
..Default::default()
|
||||
})
|
||||
.collect(),
|
||||
@@ -361,19 +345,19 @@ impl K8sTenantManager {
|
||||
let ports: Option<Vec<NetworkPolicyPort>> =
|
||||
c.1.as_ref().map(|spec| match &spec.data {
|
||||
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(port.clone().into())),
|
||||
port: Some(IntOrString::Int((*port).into())),
|
||||
..Default::default()
|
||||
}],
|
||||
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(start.clone().into())),
|
||||
end_port: Some(end.clone().into()),
|
||||
port: Some(IntOrString::Int((*start).into())),
|
||||
end_port: Some((*end).into()),
|
||||
protocol: None, // Not currently supported by Harmony
|
||||
}],
|
||||
|
||||
super::PortSpecData::ListOfPorts(items) => items
|
||||
.iter()
|
||||
.map(|i| NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(i.clone().into())),
|
||||
port: Some(IntOrString::Int((*i).into())),
|
||||
..Default::default()
|
||||
})
|
||||
.collect(),
|
||||
@@ -406,12 +390,27 @@ impl K8sTenantManager {
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for K8sTenantManager {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
k8s_client: self.k8s_client.clone(),
|
||||
k8s_tenant_config: self.k8s_tenant_config.clone(),
|
||||
network_policy_strategy: self.network_policy_strategy.clone_box(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TenantManager for K8sTenantManager {
|
||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
||||
let namespace = self.build_namespace(config)?;
|
||||
let resource_quota = self.build_resource_quota(config)?;
|
||||
|
||||
let network_policy = self.build_network_policy(config)?;
|
||||
let network_policy = self
|
||||
.network_policy_strategy
|
||||
.adjust_policy(network_policy, config);
|
||||
|
||||
let resource_limit_range = self.build_limit_range(config)?;
|
||||
|
||||
self.ensure_constraints(&namespace)?;
|
||||
@@ -428,13 +427,14 @@ impl TenantManager for K8sTenantManager {
|
||||
debug!("Creating network_policy for tenant {}", config.name);
|
||||
self.apply_resource(network_policy, config).await?;
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
"Success provisionning K8s tenant id {} name {}",
|
||||
config.id, config.name
|
||||
);
|
||||
self.store_config(config);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_tenant_config(&self) -> Option<TenantConfig> {
|
||||
self.k8s_tenant_config.get().cloned()
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
pub mod k8s;
|
||||
mod manager;
|
||||
use std::str::FromStr;
|
||||
pub mod network_policy;
|
||||
|
||||
use harmony_types::id::Id;
|
||||
pub use manager::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::data::Id;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
|
||||
pub struct TenantConfig {
|
||||
|
||||
120
harmony/src/domain/topology/tenant/network_policy.rs
Normal file
120
harmony/src/domain/topology/tenant/network_policy.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
use k8s_openapi::api::networking::v1::{
|
||||
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
|
||||
};
|
||||
|
||||
use super::TenantConfig;
|
||||
|
||||
pub trait NetworkPolicyStrategy: Send + Sync + std::fmt::Debug {
|
||||
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy>;
|
||||
|
||||
fn adjust_policy(&self, policy: NetworkPolicy, config: &TenantConfig) -> NetworkPolicy;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NoopNetworkPolicyStrategy {}
|
||||
|
||||
impl NoopNetworkPolicyStrategy {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NoopNetworkPolicyStrategy {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkPolicyStrategy for NoopNetworkPolicyStrategy {
|
||||
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
|
||||
policy
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct K3dNetworkPolicyStrategy {}
|
||||
|
||||
impl K3dNetworkPolicyStrategy {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for K3dNetworkPolicyStrategy {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkPolicyStrategy for K3dNetworkPolicyStrategy {
|
||||
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
|
||||
let mut egress = policy
|
||||
.spec
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.egress
|
||||
.clone()
|
||||
.unwrap_or_default();
|
||||
egress.push(NetworkPolicyEgressRule {
|
||||
to: Some(vec![NetworkPolicyPeer {
|
||||
ip_block: Some(IPBlock {
|
||||
cidr: "172.18.0.0/16".into(), // TODO: query the IP range https://git.nationtech.io/NationTech/harmony/issues/108
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}]),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
NetworkPolicy {
|
||||
spec: Some(NetworkPolicySpec {
|
||||
egress: Some(egress),
|
||||
..policy.spec.unwrap_or_default()
|
||||
}),
|
||||
..policy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use k8s_openapi::api::networking::v1::{
|
||||
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
|
||||
};
|
||||
|
||||
use super::{K3dNetworkPolicyStrategy, NetworkPolicyStrategy};
|
||||
|
||||
#[test]
|
||||
pub fn should_add_ip_block_for_k3d_harmony_server() {
|
||||
let strategy = K3dNetworkPolicyStrategy::new();
|
||||
|
||||
let policy =
|
||||
strategy.adjust_policy(NetworkPolicy::default(), &super::TenantConfig::default());
|
||||
|
||||
let expected_policy = NetworkPolicy {
|
||||
spec: Some(NetworkPolicySpec {
|
||||
egress: Some(vec![NetworkPolicyEgressRule {
|
||||
to: Some(vec![NetworkPolicyPeer {
|
||||
ip_block: Some(IPBlock {
|
||||
cidr: "172.18.0.0/16".into(),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}]),
|
||||
..Default::default()
|
||||
}]),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
assert_eq!(expected_policy, policy);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::executors::ExecutorError;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::{IpAddress, Url};
|
||||
use harmony_types::net::{IpAddress, Url};
|
||||
|
||||
#[async_trait]
|
||||
pub trait TftpServer: Send + Sync {
|
||||
|
||||
@@ -3,11 +3,9 @@ use std::sync::Arc;
|
||||
|
||||
use russh::{client, keys::key};
|
||||
|
||||
use crate::{
|
||||
domain::executors::{ExecutorError, SshClient},
|
||||
topology::IpAddress,
|
||||
};
|
||||
use crate::domain::executors::{ExecutorError, SshClient};
|
||||
|
||||
use harmony_types::net::IpAddress;
|
||||
pub struct RusshClient;
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::hardware::ManagementInterface;
|
||||
use crate::topology::IpAddress;
|
||||
use derive_new::new;
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
17
harmony/src/infra/inventory/mod.rs
Normal file
17
harmony/src/infra/inventory/mod.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use crate::{
|
||||
config::DATABASE_URL,
|
||||
infra::inventory::sqlite::SqliteInventoryRepository,
|
||||
inventory::{InventoryRepository, RepoError},
|
||||
};
|
||||
|
||||
pub mod sqlite;
|
||||
|
||||
pub struct InventoryRepositoryFactory;
|
||||
|
||||
impl InventoryRepositoryFactory {
|
||||
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
||||
Ok(Box::new(
|
||||
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
65
harmony/src/infra/inventory/sqlite.rs
Normal file
65
harmony/src/infra/inventory/sqlite.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
use crate::{
|
||||
hardware::PhysicalHost,
|
||||
inventory::{InventoryRepository, RepoError},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use sqlx::{Pool, Sqlite, SqlitePool};
|
||||
|
||||
/// A thread-safe, connection-pooled repository using SQLite.
|
||||
#[derive(Debug)]
|
||||
pub struct SqliteInventoryRepository {
|
||||
pool: Pool<Sqlite>,
|
||||
}
|
||||
|
||||
impl SqliteInventoryRepository {
|
||||
pub async fn new(database_url: &str) -> Result<Self, RepoError> {
|
||||
let pool = SqlitePool::connect(database_url)
|
||||
.await
|
||||
.map_err(|e| RepoError::ConnectionFailed(e.to_string()))?;
|
||||
|
||||
info!("SQLite inventory repository initialized at '{database_url}'");
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl InventoryRepository for SqliteInventoryRepository {
|
||||
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError> {
|
||||
let data = serde_json::to_vec(host).map_err(|e| RepoError::Serialization(e.to_string()))?;
|
||||
|
||||
let id = Id::default().to_string();
|
||||
let host_id = host.id.to_string();
|
||||
|
||||
sqlx::query!(
|
||||
"INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)",
|
||||
host_id,
|
||||
id,
|
||||
data,
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
info!("Saved new inventory version for host '{}'", host.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> {
|
||||
let _row = sqlx::query_as!(
|
||||
DbHost,
|
||||
r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#,
|
||||
host_id
|
||||
)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
use sqlx::types::Json;
|
||||
struct DbHost {
|
||||
data: Json<PhysicalHost>,
|
||||
id: Id,
|
||||
version_id: Id,
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
pub mod executors;
|
||||
pub mod hp_ilo;
|
||||
pub mod intel_amt;
|
||||
pub mod inventory;
|
||||
pub mod opnsense;
|
||||
mod sqlx;
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost},
|
||||
topology::{DHCPStaticEntry, DhcpServer, LogicalHost, PxeOptions},
|
||||
};
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
#[async_trait]
|
||||
impl DhcpServer for OPNSenseFirewall {
|
||||
@@ -26,7 +27,7 @@ impl DhcpServer for OPNSenseFirewall {
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
debug!("Registered {:?}", entry);
|
||||
info!("Registered {:?}", entry);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -46,57 +47,25 @@ impl DhcpServer for OPNSenseFirewall {
|
||||
self.host.clone()
|
||||
}
|
||||
|
||||
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
|
||||
let ipv4 = match ip {
|
||||
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
|
||||
std::net::IpAddr::V6(_) => todo!("ipv6 not supported yet"),
|
||||
};
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_next_server(ipv4);
|
||||
debug!("OPNsense dhcp server set next server {ipv4}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_boot_filename(boot_filename);
|
||||
debug!("OPNsense dhcp server set boot filename {boot_filename}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_filename(filename);
|
||||
debug!("OPNsense dhcp server set filename {filename}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_filename64(&self, filename: &str) -> Result<(), ExecutorError> {
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_filename64(filename);
|
||||
debug!("OPNsense dhcp server set filename {filename}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_filenameipxe(filenameipxe);
|
||||
debug!("OPNsense dhcp server set filenameipxe {filenameipxe}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> {
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
let PxeOptions {
|
||||
ipxe_filename,
|
||||
bios_filename,
|
||||
efi_filename,
|
||||
tftp_ip,
|
||||
} = options;
|
||||
writable_opnsense
|
||||
.dhcp()
|
||||
.set_pxe_options(
|
||||
tftp_ip.map(|i| i.to_string()),
|
||||
bios_filename,
|
||||
efi_filename,
|
||||
ipxe_filename,
|
||||
)
|
||||
.await
|
||||
.map_err(|dhcp_error| {
|
||||
ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::infra::opnsense::Host;
|
||||
use crate::infra::opnsense::IpAddress;
|
||||
use crate::infra::opnsense::LogicalHost;
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{DnsRecord, DnsServer},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
|
||||
@@ -60,7 +60,7 @@ impl DnsServer for OPNSenseFirewall {
|
||||
}
|
||||
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
OPNSenseFirewall::get_ip(&self)
|
||||
OPNSenseFirewall::get_ip(self)
|
||||
}
|
||||
|
||||
fn get_host(&self) -> LogicalHost {
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{Firewall, FirewallRule, IpAddress, LogicalHost},
|
||||
topology::{Firewall, FirewallRule, LogicalHost},
|
||||
};
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
impl Firewall for OPNSenseFirewall {
|
||||
fn add_rule(&mut self, _rule: FirewallRule) -> Result<(), ExecutorError> {
|
||||
|
||||
@@ -1,24 +1,22 @@
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{HttpServer, IpAddress, Url},
|
||||
};
|
||||
use crate::{data::FileContent, executors::ExecutorError, topology::HttpServer};
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::Url;
|
||||
const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http";
|
||||
|
||||
#[async_trait]
|
||||
impl HttpServer for OPNSenseFirewall {
|
||||
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
|
||||
let http_root_path = "/usr/local/http";
|
||||
|
||||
let config = self.opnsense_config.read().await;
|
||||
info!("Uploading files from url {url} to {http_root_path}");
|
||||
info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}");
|
||||
match url {
|
||||
Url::LocalFolder(path) => {
|
||||
config
|
||||
.upload_files(path, http_root_path)
|
||||
.upload_files(path, OPNSENSE_HTTP_ROOT_PATH)
|
||||
.await
|
||||
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
|
||||
}
|
||||
@@ -27,8 +25,29 @@ impl HttpServer for OPNSenseFirewall {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
|
||||
let path = match &file.path {
|
||||
crate::data::FilePath::Relative(path) => {
|
||||
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
|
||||
}
|
||||
crate::data::FilePath::Absolute(path) => {
|
||||
return Err(ExecutorError::ConfigurationError(format!(
|
||||
"Cannot serve file from http server with absolute path : {path}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let config = self.opnsense_config.read().await;
|
||||
info!("Uploading file content to {}", path);
|
||||
config
|
||||
.upload_file_content(&path, &file.content)
|
||||
.await
|
||||
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
todo!();
|
||||
OPNSenseFirewall::get_ip(self)
|
||||
}
|
||||
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
@@ -48,7 +67,7 @@ impl HttpServer for OPNSenseFirewall {
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||
let mut config = self.opnsense_config.write().await;
|
||||
let caddy = config.caddy();
|
||||
if let None = caddy.get_full_config() {
|
||||
if caddy.get_full_config().is_none() {
|
||||
info!("Http config not available in opnsense config, installing package");
|
||||
config.install_package("os-caddy").await.map_err(|e| {
|
||||
ExecutorError::UnexpectedError(format!(
|
||||
|
||||
@@ -6,10 +6,11 @@ use uuid::Uuid;
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{
|
||||
BackendServer, HealthCheck, HttpMethod, HttpStatusCode, IpAddress, LoadBalancer,
|
||||
LoadBalancerService, LogicalHost,
|
||||
BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService,
|
||||
LogicalHost,
|
||||
},
|
||||
};
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
|
||||
@@ -121,10 +122,12 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
||||
|
||||
LoadBalancerService {
|
||||
backend_servers,
|
||||
listening_port: frontend.bind.parse().expect(&format!(
|
||||
"HAProxy frontend address should be a valid SocketAddr, got {}",
|
||||
frontend.bind
|
||||
)),
|
||||
listening_port: frontend.bind.parse().unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"HAProxy frontend address should be a valid SocketAddr, got {}",
|
||||
frontend.bind
|
||||
)
|
||||
}),
|
||||
health_check,
|
||||
}
|
||||
})
|
||||
@@ -167,28 +170,28 @@ pub(crate) fn get_health_check_for_backend(
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let haproxy_health_check = match haproxy
|
||||
let haproxy_health_check = haproxy
|
||||
.healthchecks
|
||||
.healthchecks
|
||||
.iter()
|
||||
.find(|h| &h.uuid == health_check_uuid)
|
||||
{
|
||||
Some(health_check) => health_check,
|
||||
None => return None,
|
||||
};
|
||||
.find(|h| &h.uuid == health_check_uuid)?;
|
||||
|
||||
let binding = haproxy_health_check.health_check_type.to_uppercase();
|
||||
let uppercase = binding.as_str();
|
||||
match uppercase {
|
||||
"TCP" => {
|
||||
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
|
||||
if checkport.len() > 0 {
|
||||
return Some(HealthCheck::TCP(Some(checkport.parse().expect(&format!(
|
||||
"HAProxy check port should be a valid port number, got {checkport}"
|
||||
)))));
|
||||
if !checkport.is_empty() {
|
||||
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|
||||
|_| {
|
||||
panic!(
|
||||
"HAProxy check port should be a valid port number, got {checkport}"
|
||||
)
|
||||
},
|
||||
))));
|
||||
}
|
||||
}
|
||||
return Some(HealthCheck::TCP(None));
|
||||
Some(HealthCheck::TCP(None))
|
||||
}
|
||||
"HTTP" => {
|
||||
let path: String = haproxy_health_check
|
||||
@@ -355,16 +358,13 @@ mod tests {
|
||||
|
||||
// Create an HAProxy instance with servers
|
||||
let mut haproxy = HAProxy::default();
|
||||
let mut server = HAProxyServer::default();
|
||||
server.uuid = "server1".to_string();
|
||||
server.address = "192.168.1.1".to_string();
|
||||
server.port = 80;
|
||||
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
let mut server = HAProxyServer::default();
|
||||
server.uuid = "server3".to_string();
|
||||
server.address = "192.168.1.3".to_string();
|
||||
server.port = 8080;
|
||||
|
||||
// Call the function
|
||||
let result = get_servers_for_backend(&backend, &haproxy);
|
||||
@@ -384,10 +384,12 @@ mod tests {
|
||||
let backend = HAProxyBackend::default();
|
||||
// Create an HAProxy instance with servers
|
||||
let mut haproxy = HAProxy::default();
|
||||
let mut server = HAProxyServer::default();
|
||||
server.uuid = "server1".to_string();
|
||||
server.address = "192.168.1.1".to_string();
|
||||
server.port = 80;
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
// Call the function
|
||||
let result = get_servers_for_backend(&backend, &haproxy);
|
||||
@@ -402,10 +404,12 @@ mod tests {
|
||||
backend.linked_servers.content = Some("server4,server5".to_string());
|
||||
// Create an HAProxy instance with servers
|
||||
let mut haproxy = HAProxy::default();
|
||||
let mut server = HAProxyServer::default();
|
||||
server.uuid = "server1".to_string();
|
||||
server.address = "192.168.1.1".to_string();
|
||||
server.port = 80;
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
// Call the function
|
||||
let result = get_servers_for_backend(&backend, &haproxy);
|
||||
@@ -416,20 +420,28 @@ mod tests {
|
||||
#[test]
|
||||
fn test_get_servers_for_backend_multiple_linked_servers() {
|
||||
// Create a backend with multiple linked servers
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
let mut backend = HAProxyBackend::default();
|
||||
backend.linked_servers.content = Some("server1,server2".to_string());
|
||||
//
|
||||
// Create an HAProxy instance with matching servers
|
||||
let mut haproxy = HAProxy::default();
|
||||
let mut server = HAProxyServer::default();
|
||||
server.uuid = "server1".to_string();
|
||||
server.address = "some-hostname.test.mcd".to_string();
|
||||
server.port = 80;
|
||||
let server = HAProxyServer {
|
||||
uuid: "server1".to_string(),
|
||||
address: "some-hostname.test.mcd".to_string(),
|
||||
port: 80,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
let mut server = HAProxyServer::default();
|
||||
server.uuid = "server2".to_string();
|
||||
server.address = "192.168.1.2".to_string();
|
||||
server.port = 8080;
|
||||
|
||||
let server = HAProxyServer {
|
||||
uuid: "server2".to_string(),
|
||||
address: "192.168.1.2".to_string(),
|
||||
port: 8080,
|
||||
..Default::default()
|
||||
};
|
||||
haproxy.servers.servers.push(server);
|
||||
|
||||
// Call the function
|
||||
let result = get_servers_for_backend(&backend, &haproxy);
|
||||
// Check the result
|
||||
|
||||
@@ -11,10 +11,8 @@ pub use management::*;
|
||||
use opnsense_config_xml::Host;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{IpAddress, LogicalHost},
|
||||
};
|
||||
use crate::{executors::ExecutorError, topology::LogicalHost};
|
||||
use harmony_types::net::IpAddress;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OPNSenseFirewall {
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{IpAddress, TftpServer, Url},
|
||||
};
|
||||
use crate::{executors::ExecutorError, topology::TftpServer};
|
||||
use harmony_types::net::IpAddress;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
use super::OPNSenseFirewall;
|
||||
|
||||
@@ -28,7 +27,7 @@ impl TftpServer for OPNSenseFirewall {
|
||||
}
|
||||
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
todo!()
|
||||
OPNSenseFirewall::get_ip(self)
|
||||
}
|
||||
|
||||
async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> {
|
||||
@@ -58,7 +57,7 @@ impl TftpServer for OPNSenseFirewall {
|
||||
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||
let mut config = self.opnsense_config.write().await;
|
||||
let tftp = config.tftp();
|
||||
if let None = tftp.get_full_config() {
|
||||
if tftp.get_full_config().is_none() {
|
||||
info!("Tftp config not available in opnsense config, installing package");
|
||||
config.install_package("os-tftp").await.map_err(|e| {
|
||||
ExecutorError::UnexpectedError(format!(
|
||||
|
||||
36
harmony/src/infra/sqlx.rs
Normal file
36
harmony/src/infra/sqlx.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use crate::inventory::RepoError;
|
||||
|
||||
impl From<sqlx::Error> for RepoError {
|
||||
fn from(value: sqlx::Error) -> Self {
|
||||
match value {
|
||||
sqlx::Error::Configuration(_)
|
||||
| sqlx::Error::Io(_)
|
||||
| sqlx::Error::Tls(_)
|
||||
| sqlx::Error::Protocol(_)
|
||||
| sqlx::Error::PoolTimedOut
|
||||
| sqlx::Error::PoolClosed
|
||||
| sqlx::Error::WorkerCrashed => RepoError::ConnectionFailed(value.to_string()),
|
||||
sqlx::Error::InvalidArgument(_)
|
||||
| sqlx::Error::Database(_)
|
||||
| sqlx::Error::RowNotFound
|
||||
| sqlx::Error::TypeNotFound { .. }
|
||||
| sqlx::Error::ColumnIndexOutOfBounds { .. }
|
||||
| sqlx::Error::ColumnNotFound(_)
|
||||
| sqlx::Error::AnyDriverError(_)
|
||||
| sqlx::Error::Migrate(_)
|
||||
| sqlx::Error::InvalidSavePointStatement
|
||||
| sqlx::Error::BeginFailed => RepoError::QueryFailed(value.to_string()),
|
||||
sqlx::Error::Encode(_) => RepoError::Serialization(value.to_string()),
|
||||
sqlx::Error::Decode(_) | sqlx::Error::ColumnDecode { .. } => {
|
||||
RepoError::Deserialization(value.to_string())
|
||||
}
|
||||
_ => RepoError::QueryFailed(value.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for RepoError {
|
||||
fn from(value: serde_json::Error) -> Self {
|
||||
RepoError::Serialization(value.to_string())
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ pub trait ApplicationFeature<T: Topology>:
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
trait ApplicationFeatureClone<T: Topology> {
|
||||
pub trait ApplicationFeatureClone<T: Topology> {
|
||||
fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>;
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ where
|
||||
}
|
||||
|
||||
impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
|
||||
@@ -184,12 +184,11 @@ impl ArgoApplication {
|
||||
pub fn to_yaml(&self) -> serde_yaml::Value {
|
||||
let name = &self.name;
|
||||
let namespace = if let Some(ns) = self.namespace.as_ref() {
|
||||
&ns
|
||||
ns
|
||||
} else {
|
||||
"argocd"
|
||||
};
|
||||
let project = &self.project;
|
||||
let source = &self.source;
|
||||
|
||||
let yaml_str = format!(
|
||||
r#"
|
||||
@@ -228,7 +227,7 @@ spec:
|
||||
serde_yaml::to_value(&self.source).expect("couldn't serialize source to value");
|
||||
let sync_policy = serde_yaml::to_value(&self.sync_policy)
|
||||
.expect("couldn't serialize sync_policy to value");
|
||||
let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit)
|
||||
let revision_history_limit = serde_yaml::to_value(self.revision_history_limit)
|
||||
.expect("couldn't serialize revision_history_limit to value");
|
||||
|
||||
spec.insert(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::{io::Write, process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::{debug, error};
|
||||
use log::info;
|
||||
use serde_yaml::Value;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::application::{
|
||||
Application, ApplicationFeature, HelmPackage, OCICompliant,
|
||||
ApplicationFeature, HelmPackage, OCICompliant,
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
},
|
||||
score::Score,
|
||||
@@ -56,14 +56,11 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
chart_url: String,
|
||||
image_name: String,
|
||||
) -> Result<(), String> {
|
||||
error!(
|
||||
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
|
||||
);
|
||||
|
||||
error!("TODO hardcoded k3d bin path is wrong");
|
||||
// TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/106
|
||||
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
|
||||
// --- 1. Import the container image into the k3d cluster ---
|
||||
debug!(
|
||||
info!(
|
||||
"Importing image '{}' into k3d cluster 'harmony'",
|
||||
image_name
|
||||
);
|
||||
@@ -80,7 +77,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
}
|
||||
|
||||
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
|
||||
debug!("Retrieving kubeconfig for k3d cluster 'harmony'");
|
||||
info!("Retrieving kubeconfig for k3d cluster 'harmony'");
|
||||
let kubeconfig_output = Command::new(&k3d_bin_path)
|
||||
.args(["kubeconfig", "get", "harmony"])
|
||||
.output()
|
||||
@@ -101,7 +98,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
|
||||
|
||||
// --- 3. Install or upgrade the Helm chart in the cluster ---
|
||||
debug!(
|
||||
info!(
|
||||
"Deploying Helm chart '{}' to namespace '{}'",
|
||||
chart_url, app_name
|
||||
);
|
||||
@@ -131,7 +128,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
));
|
||||
}
|
||||
|
||||
debug!("Successfully deployed '{}' to local k3d cluster.", app_name);
|
||||
info!("Successfully deployed '{}' to local k3d cluster.", app_name);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -151,14 +148,12 @@ impl<
|
||||
// Or ask for it when unknown
|
||||
|
||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||
debug!("Pushed new helm chart {helm_chart}");
|
||||
|
||||
error!("TODO Make building image configurable/skippable if image already exists (prompt)");
|
||||
// TODO: Make building image configurable/skippable if image already exists (prompt)")
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/104
|
||||
let image = self.application.build_push_oci_image().await?;
|
||||
debug!("Pushed new docker image {image}");
|
||||
|
||||
debug!("Installing ContinuousDelivery feature");
|
||||
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
||||
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
|
||||
// by the topology only and we should not have to know how to perform tasks like this for
|
||||
// which the topology should be responsible.
|
||||
//
|
||||
@@ -171,17 +166,20 @@ impl<
|
||||
// access it. This forces every Topology to understand the concept of targets though... So
|
||||
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
||||
// goes. It still does not feel right though.
|
||||
//
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/106
|
||||
match topology.current_target() {
|
||||
DeploymentTarget::LocalDev => {
|
||||
info!("Deploying {} locally...", self.application.name());
|
||||
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
||||
.await?;
|
||||
}
|
||||
target => {
|
||||
debug!("Deploying to target {target:?}");
|
||||
info!("Deploying {} to target {target:?}", self.application.name());
|
||||
let score = ArgoHelmScore {
|
||||
namespace: "harmonydemo-staging".to_string(),
|
||||
openshift: false,
|
||||
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
|
||||
namespace: "harmony-example-rust-webapp".to_string(),
|
||||
openshift: true,
|
||||
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
@@ -189,12 +187,11 @@ impl<
|
||||
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
||||
values_overrides: None,
|
||||
name: "harmony-demo-rust-webapp".to_string(),
|
||||
namespace: "harmonydemo-staging".to_string(),
|
||||
namespace: "harmony-example-rust-webapp".to_string(),
|
||||
})],
|
||||
};
|
||||
score
|
||||
.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
use async_trait::async_trait;
|
||||
use log::error;
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use super::ArgoApplication;
|
||||
|
||||
@@ -50,20 +50,21 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
error!("Uncomment below, only disabled for debugging");
|
||||
self.score
|
||||
.create_interpret()
|
||||
.execute(inventory, topology)
|
||||
.await?;
|
||||
self.score.interpret(inventory, topology).await?;
|
||||
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
k8s_client
|
||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Successfully installed ArgoCD and {} Applications",
|
||||
self.argo_apps.len()
|
||||
"ArgoCD installed with {} {}",
|
||||
self.argo_apps.len(),
|
||||
match self.argo_apps.len() {
|
||||
1 => "application",
|
||||
_ => "applications",
|
||||
}
|
||||
)))
|
||||
}
|
||||
|
||||
@@ -986,7 +987,7 @@ commitServer:
|
||||
);
|
||||
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
|
||||
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
|
||||
release_name: NonBlankString::from_str("argo-cd").unwrap(),
|
||||
chart_name: NonBlankString::from_str("argo/argo-cd").unwrap(),
|
||||
chart_version: Some(NonBlankString::from_str("8.1.2").unwrap()),
|
||||
|
||||
@@ -1,51 +1,68 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use log::{debug, info};
|
||||
use crate::modules::application::{Application, ApplicationFeature};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{ApplicationFeature, OCICompliant},
|
||||
monitoring::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver,
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{NamespaceSelector, ServiceMonitor},
|
||||
},
|
||||
ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
modules::monitoring::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
|
||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||
};
|
||||
use crate::{
|
||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Monitoring {
|
||||
pub application: Arc<dyn OCICompliant>,
|
||||
pub application: Arc<dyn Application>,
|
||||
pub alert_receiver: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> ApplicationFeature<T>
|
||||
for Monitoring
|
||||
impl<
|
||||
T: Topology
|
||||
+ HelmCommand
|
||||
+ 'static
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>,
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
info!("Ensuring monitoring is available for application");
|
||||
|
||||
let ntfy = NtfyScore {
|
||||
// namespace: topology
|
||||
// .get_tenant_config()
|
||||
// .await
|
||||
// .expect("couldn't get tenant config")
|
||||
// .name,
|
||||
namespace: self.application.name(),
|
||||
host: "localhost".to_string(),
|
||||
};
|
||||
ntfy.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
let namespace = topology
|
||||
.get_tenant_config()
|
||||
.await
|
||||
.expect("couldn't create interpret for ntfy");
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
|
||||
let mut alerting_score = ApplicationMonitoringScore {
|
||||
sender: CRDPrometheus {
|
||||
namespace: namespace.clone(),
|
||||
client: topology.k8s_client().await.unwrap(),
|
||||
},
|
||||
application: self.application.clone(),
|
||||
receivers: self.alert_receiver.clone(),
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
@@ -70,7 +87,7 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
||||
url::Url::parse(
|
||||
format!(
|
||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||
self.application.name()
|
||||
namespace.clone()
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
@@ -78,31 +95,11 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
||||
),
|
||||
};
|
||||
|
||||
let mut service_monitor = ServiceMonitor::default();
|
||||
service_monitor.namespace_selector = Some(NamespaceSelector {
|
||||
any: true,
|
||||
match_names: vec![],
|
||||
});
|
||||
|
||||
service_monitor.name = "rust-webapp".to_string();
|
||||
|
||||
// let alerting_score = ApplicationPrometheusMonitoringScore {
|
||||
// receivers: vec![Box::new(ntfy_receiver)],
|
||||
// rules: vec![],
|
||||
// service_monitors: vec![service_monitor],
|
||||
// };
|
||||
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(ntfy_receiver)],
|
||||
rules: vec![],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
|
||||
alerting_score.receivers.push(Box::new(ntfy_receiver));
|
||||
alerting_score
|
||||
.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
.map_err(|e| e.to_string())?;
|
||||
Ok(())
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
|
||||
@@ -10,13 +10,23 @@ pub use oci::*;
|
||||
pub use rust::*;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
data::Version,
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::Topology,
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ApplicationFeatureStatus {
|
||||
Installing,
|
||||
Installed,
|
||||
Failed { details: String },
|
||||
}
|
||||
|
||||
pub trait Application: std::fmt::Debug + Send + Sync {
|
||||
fn name(&self) -> String;
|
||||
@@ -46,20 +56,41 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
||||
.join(", ")
|
||||
);
|
||||
for feature in self.features.iter() {
|
||||
debug!(
|
||||
"Installing feature {} for application {app_name}",
|
||||
feature.name()
|
||||
);
|
||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||
topology: topology.name().into(),
|
||||
application: self.application.name(),
|
||||
feature: feature.name(),
|
||||
status: ApplicationFeatureStatus::Installing,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let _ = match feature.ensure_installed(topology).await {
|
||||
Ok(()) => (),
|
||||
Ok(()) => {
|
||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||
topology: topology.name().into(),
|
||||
application: self.application.name(),
|
||||
feature: feature.name(),
|
||||
status: ApplicationFeatureStatus::Installed,
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
Err(msg) => {
|
||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
||||
topology: topology.name().into(),
|
||||
application: self.application.name(),
|
||||
feature: feature.name(),
|
||||
status: ApplicationFeatureStatus::Failed {
|
||||
details: msg.clone(),
|
||||
},
|
||||
})
|
||||
.unwrap();
|
||||
return Err(InterpretError::new(format!(
|
||||
"Application Interpret failed to install feature : {msg}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
}
|
||||
Ok(Outcome::success("successfully created app".to_string()))
|
||||
Ok(Outcome::success("Application created".to_string()))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
@@ -78,3 +109,12 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for dyn Application {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -10,15 +10,13 @@ use dockerfile_builder::Dockerfile;
|
||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||
use futures_util::StreamExt;
|
||||
use log::{debug, error, log_enabled};
|
||||
use log::{debug, info, log_enabled};
|
||||
use serde::Serialize;
|
||||
use tar::Archive;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::{
|
||||
score::Score,
|
||||
topology::{Topology, Url},
|
||||
};
|
||||
use crate::{score::Score, topology::Topology};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
|
||||
@@ -46,7 +44,7 @@ where
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("Application: {}", self.application.name())
|
||||
format!("{} [ApplicationScore]", self.application.name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,19 +71,19 @@ impl Application for RustWebapp {
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||
debug!("Starting Helm chart build and push for '{}'", self.name);
|
||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
||||
|
||||
// 1. Create the Helm chart files on disk.
|
||||
let chart_dir = self
|
||||
.create_helm_chart_files(image_url)
|
||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||
debug!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
|
||||
// 2. Package the chart into a .tgz archive.
|
||||
let packaged_chart_path = self
|
||||
.package_helm_chart(&chart_dir)
|
||||
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
|
||||
debug!(
|
||||
info!(
|
||||
"Successfully packaged Helm chart: {}",
|
||||
packaged_chart_path.to_string_lossy()
|
||||
);
|
||||
@@ -94,7 +92,7 @@ impl HelmPackage for RustWebapp {
|
||||
let oci_chart_url = self
|
||||
.push_helm_chart(&packaged_chart_path)
|
||||
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
|
||||
debug!("Successfully pushed Helm chart to: {}", oci_chart_url);
|
||||
info!("Successfully pushed Helm chart to: {}", oci_chart_url);
|
||||
|
||||
Ok(oci_chart_url)
|
||||
}
|
||||
@@ -107,20 +105,20 @@ impl OCICompliant for RustWebapp {
|
||||
async fn build_push_oci_image(&self) -> Result<String, String> {
|
||||
// This function orchestrates the build and push process.
|
||||
// It's async to match the trait definition, though the underlying docker commands are blocking.
|
||||
debug!("Starting OCI image build and push for '{}'", self.name);
|
||||
info!("Starting OCI image build and push for '{}'", self.name);
|
||||
|
||||
// 1. Build the image by calling the synchronous helper function.
|
||||
let image_tag = self.image_name();
|
||||
self.build_docker_image(&image_tag)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
|
||||
debug!("Successfully built Docker image: {}", image_tag);
|
||||
info!("Successfully built Docker image: {}", image_tag);
|
||||
|
||||
// 2. Push the image to the registry.
|
||||
self.push_docker_image(&image_tag)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
|
||||
debug!("Successfully pushed Docker image to: {}", image_tag);
|
||||
info!("Successfully pushed Docker image to: {}", image_tag);
|
||||
|
||||
Ok(image_tag)
|
||||
}
|
||||
@@ -174,7 +172,7 @@ impl RustWebapp {
|
||||
.platform("linux/x86_64");
|
||||
|
||||
let mut temp_tar_builder = tar::Builder::new(Vec::new());
|
||||
let _ = temp_tar_builder
|
||||
temp_tar_builder
|
||||
.append_dir_all("", self.project_root.clone())
|
||||
.unwrap();
|
||||
let archive = temp_tar_builder
|
||||
@@ -195,7 +193,7 @@ impl RustWebapp {
|
||||
);
|
||||
|
||||
while let Some(msg) = image_build_stream.next().await {
|
||||
println!("Message: {msg:?}");
|
||||
debug!("Message: {msg:?}");
|
||||
}
|
||||
|
||||
Ok(image_name.to_string())
|
||||
@@ -219,7 +217,7 @@ impl RustWebapp {
|
||||
);
|
||||
|
||||
while let Some(msg) = push_image_stream.next().await {
|
||||
println!("Message: {msg:?}");
|
||||
debug!("Message: {msg:?}");
|
||||
}
|
||||
|
||||
Ok(image_tag.to_string())
|
||||
@@ -288,9 +286,8 @@ impl RustWebapp {
|
||||
.unwrap(),
|
||||
);
|
||||
// Copy the compiled binary from the builder stage.
|
||||
error!(
|
||||
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
|
||||
);
|
||||
// TODO: Should not be using score name here, instead should use name from Cargo.toml
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/105
|
||||
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
||||
let binary_path_in_final = format!("/home/appuser/{}", self.name);
|
||||
dockerfile.push(
|
||||
@@ -328,9 +325,8 @@ impl RustWebapp {
|
||||
));
|
||||
|
||||
// Copy only the compiled binary from the builder stage.
|
||||
error!(
|
||||
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
|
||||
);
|
||||
// TODO: Should not be using score name here, instead should use name from Cargo.toml
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/105
|
||||
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
||||
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
|
||||
dockerfile.push(
|
||||
@@ -530,10 +526,7 @@ spec:
|
||||
}
|
||||
|
||||
/// Packages a Helm chart directory into a .tgz file.
|
||||
fn package_helm_chart(
|
||||
&self,
|
||||
chart_dir: &PathBuf,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
fn package_helm_chart(&self, chart_dir: &Path) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname");
|
||||
debug!(
|
||||
"Launching `helm package {}` cli with CWD {}",
|
||||
@@ -546,14 +539,13 @@ spec:
|
||||
);
|
||||
let output = process::Command::new("helm")
|
||||
.args(["package", chart_dirname.to_str().unwrap()])
|
||||
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
|
||||
.current_dir(self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
|
||||
.output()?;
|
||||
|
||||
self.check_output(&output, "Failed to package Helm chart")?;
|
||||
|
||||
// Helm prints the path of the created chart to stdout.
|
||||
let tgz_name = String::from_utf8(output.stdout)?
|
||||
.trim()
|
||||
.split_whitespace()
|
||||
.last()
|
||||
.unwrap_or_default()
|
||||
@@ -573,7 +565,7 @@ spec:
|
||||
/// Pushes a packaged Helm chart to an OCI registry.
|
||||
fn push_helm_chart(
|
||||
&self,
|
||||
packaged_chart_path: &PathBuf,
|
||||
packaged_chart_path: &Path,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
// The chart name is the file stem of the .tgz file
|
||||
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user