Compare commits

..

3 Commits

Author SHA1 Message Date
tahahawa
670b701f6a use figment and try to make an "upgradeable firewall" 2025-07-16 02:12:58 -04:00
tahahawa
1eaae2016a WIP: Create Upgradeable trait 2025-07-15 00:16:57 -04:00
tahahawa
c4f4a58dcf Add new fields for OPNSense 25.1 2025-07-12 01:05:21 -04:00
249 changed files with 2422 additions and 19875 deletions

View File

@@ -9,7 +9,7 @@ jobs:
check:
runs-on: docker
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -7,7 +7,7 @@ on:
jobs:
package_harmony_composer:
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
runs-on: dind
steps:
- name: Checkout code
@@ -45,14 +45,14 @@ jobs:
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
| jq -r '.id // empty')
if [ -n "$RELEASE_ID" ]; then
# Delete existing release
curl -X DELETE \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
fi
# Create new release
RESPONSE=$(curl -X POST \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
@@ -65,7 +65,7 @@ jobs:
"prerelease": true
}' \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
- name: Upload Linux binary

30
.gitignore vendored
View File

@@ -1,25 +1,5 @@
### General ###
private_repos/
### Harmony ###
harmony.log
### Helm ###
# Chart dependencies
**/charts/*.tgz
### Rust ###
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
target
private_repos
log/
*.tgz
.gitignore

View File

@@ -1,20 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
"describe": {
"columns": [
{
"name": "host_id",
"ordinal": 0,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false
]
},
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
}

View File

@@ -1,32 +0,0 @@
{
"db_name": "SQLite",
"query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json<PhysicalHost>\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "version_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "data: Json<PhysicalHost>",
"ordinal": 2,
"type_info": "Blob"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
false
]
},
"hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067"
}

View File

@@ -1,32 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT id, version_id, data as \"data: Json<PhysicalHost>\" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "version_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "data: Json<PhysicalHost>",
"ordinal": 2,
"type_info": "Null"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false,
false,
false
]
},
"hash": "934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990"
}

View File

@@ -1,12 +0,0 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 2
},
"nullable": []
},
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
}

View File

@@ -1,12 +0,0 @@
{
"db_name": "SQLite",
"query": "INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8"
}

1873
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,6 @@ members = [
"harmony_cli",
"k3d",
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret", "adr/agent_discovery/mdns",
]
[workspace.package]
@@ -23,7 +20,7 @@ readme = "README.md"
license = "GNU AGPL v3"
[workspace.dependencies]
log = { version = "0.4", features = ["kv"] }
log = "0.4"
env_logger = "0.11"
derive-new = "0.7"
async-trait = "0.1"
@@ -36,7 +33,7 @@ tokio = { version = "1.40", features = [
cidr = { features = ["serde"], version = "0.2" }
russh = "0.45"
russh-keys = "0.45"
rand = "0.9"
rand = "0.8"
url = "2.5"
kube = { version = "1.1.0", features = [
"config",
@@ -56,15 +53,7 @@ chrono = "0.4"
similar = "2"
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
pretty_assertions = "1.4.1"
tempfile = "3.20.0"
bollard = "0.19.1"
base64 = "0.22.1"
tar = "0.4.44"
lazy_static = "1.5.0"
directories = "6.0.0"
thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
askama = "0.14"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
figment = { version = "0.10.19", features = ["env"] }

View File

@@ -1,4 +1,4 @@
FROM docker.io/rust:1.89.0 AS build
FROM docker.io/rust:1.87.0 AS build
WORKDIR /app
@@ -6,14 +6,13 @@ COPY . .
RUN cargo build --release --bin harmony_composer
FROM docker.io/rust:1.89.0
FROM docker.io/rust:1.87.0
WORKDIR /app
RUN rustup target add x86_64-pc-windows-gnu
RUN rustup target add x86_64-unknown-linux-gnu
RUN rustup component add rustfmt
RUN rustup component add clippy
RUN apt update
@@ -23,4 +22,4 @@ RUN apt install -y nodejs docker.io mingw-w64
COPY --from=build /app/target/release/harmony_composer .
ENTRYPOINT ["/app/harmony_composer"]
ENTRYPOINT ["/app/harmony_composer"]

View File

@@ -1,6 +1,5 @@
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
_By [NationTech](https://nationtech.io)_
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code.
*By [NationTech](https://nationtech.io)*
[![Build](https://git.nationtech.io/NationTech/harmony/actions/workflows/check.yml/badge.svg)](https://git.nationtech.io/nationtech/harmony)
[![License](https://img.shields.io/badge/license-AGPLv3-blue?style=flat-square)](LICENSE)
@@ -24,11 +23,11 @@ From a **developer laptop** to a **global production cluster**, a single **sourc
Infrastructure is essential, but it shouldnt be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
| Principle | What it means for you |
| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your applications needs match the target environments capabilities at **compile-time**, eliminating an entire class of runtime outages. |
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
| Principle | What it means for you |
|-----------|-----------------------|
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your applications needs match the target environments capabilities at **compile-time**, eliminating an entire class of runtime outages. |
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
@@ -64,20 +63,22 @@ async fn main() {
},
};
// 2. Enhance with extra scores (monitoring, CI/CD, …)
// 2. Pick where it should run
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
)
.await
.unwrap();
// 3. Enhance with extra scores (monitoring, CI/CD, …)
let mut monitoring = MonitoringAlertingStackScore::new();
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
// 3. Run your scores on the desired topology & inventory
harmony_cli::run(
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
vec![
Box::new(lamp_stack),
Box::new(monitoring)
],
None
).await.unwrap();
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring)]);
// 4. Launch an interactive CLI / TUI
harmony_cli::init(maestro, None).await.unwrap();
}
```
@@ -93,13 +94,13 @@ Harmony analyses the code, shows an execution plan in a TUI, and applies it once
## 3 · Core Concepts
| Term | One-liner |
| ---------------- | ---------------------------------------------------------------------------------------------------- |
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
| Term | One-liner |
|------|-----------|
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified *Capabilities* (Kubernetes, DNS, …). |
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
A visual overview is in the diagram below.
@@ -111,9 +112,9 @@ A visual overview is in the diagram below.
Prerequisites:
- Rust
- Docker (if you deploy locally)
- `kubectl` / `helm` for Kubernetes-based topologies
* Rust
* Docker (if you deploy locally)
* `kubectl` / `helm` for Kubernetes-based topologies
```bash
git clone https://git.nationtech.io/nationtech/harmony
@@ -125,15 +126,15 @@ cargo build --release # builds the CLI, TUI and libraries
## 5 · Learning More
- **Architectural Decision Records** dive into the rationale
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
* **Architectural Decision Records** dive into the rationale
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
- **Extending Harmony** write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
* **Extending Harmony** write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
- **Community** discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
* **Community** discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
---
@@ -147,4 +148,4 @@ See [LICENSE](LICENSE) for the full text.
---
_Made with ❤️ & 🦀 by the NationTech and the Harmony community_
*Made with ❤️ & 🦀 by the NationTech and the Harmony community*

View File

@@ -1,17 +0,0 @@
[package]
name = "mdns"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
mdns-sd = "0.14"
tokio = { version = "1", features = ["full"] }
futures = "0.3"
dmidecode = "0.2" # For getting the motherboard ID on the agent
log.workspace=true
env_logger.workspace=true
clap = { version = "4.5.46", features = ["derive"] }
get_if_addrs = "0.5.3"
local-ip-address = "0.6.5"

View File

@@ -1,60 +0,0 @@
// harmony-agent/src/main.rs
use log::info;
use mdns_sd::{ServiceDaemon, ServiceInfo};
use std::collections::HashMap;
use crate::SERVICE_TYPE;
// The service we are advertising.
const SERVICE_PORT: u16 = 43210; // A port for the service. It needs one, even if unused.
pub async fn advertise() {
info!("Starting Harmony Agent...");
// Get a unique ID for this machine.
let motherboard_id = "some motherboard id";
let instance_name = format!("harmony-agent-{}", motherboard_id);
info!("This agent's instance name: {}", instance_name);
info!("Advertising with ID: {}", motherboard_id);
// Create a new mDNS daemon.
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
// Create a TXT record HashMap to hold our metadata.
let mut properties = HashMap::new();
properties.insert("id".to_string(), motherboard_id.to_string());
properties.insert("version".to_string(), "1.0".to_string());
// Create the service information.
// The instance name should be unique on the network.
let local_ip = local_ip_address::local_ip().unwrap();
let service_info = ServiceInfo::new(
SERVICE_TYPE,
&instance_name,
"harmony-host.local.", // A hostname for the service
local_ip,
// "0.0.0.0",
SERVICE_PORT,
Some(properties),
)
.expect("Failed to create service info");
// Register our service with the daemon.
mdns.register(service_info)
.expect("Failed to register service");
info!(
"Service '{}' registered and now being advertised.",
instance_name
);
info!("Agent is running. Press Ctrl+C to exit.");
for iface in get_if_addrs::get_if_addrs().unwrap() {
println!("{:#?}", iface);
}
// Keep the agent running indefinitely.
tokio::signal::ctrl_c().await.unwrap();
info!("Shutting down agent.");
}

View File

@@ -1,110 +0,0 @@
use log::debug;
use mdns_sd::{ServiceDaemon, ServiceEvent};
use crate::SERVICE_TYPE;
pub async fn discover() {
println!("Starting Harmony Master and browsing for agents...");
// Create a new mDNS daemon.
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
// Start browsing for the service type.
// The receiver will be a stream of events.
let receiver = mdns.browse(SERVICE_TYPE).expect("Failed to browse");
println!(
"Listening for mDNS events for '{}'. Press Ctrl+C to exit.",
SERVICE_TYPE
);
std::thread::spawn(move || {
while let Ok(event) = receiver.recv() {
match event {
ServiceEvent::ServiceData(resolved) => {
println!("Resolved a new service: {}", resolved.fullname);
}
other_event => {
println!("Received other event: {:?}", &other_event);
}
}
}
});
// Gracefully shutdown the daemon.
std::thread::sleep(std::time::Duration::from_secs(1000000));
mdns.shutdown().unwrap();
// Process events as they come in.
// while let Ok(event) = receiver.recv_async().await {
// debug!("Received event {event:?}");
// // match event {
// // ServiceEvent::ServiceFound(svc_type, fullname) => {
// // println!("\n--- Agent Discovered ---");
// // println!(" Service Name: {}", fullname());
// // // You can now resolve this service to get its IP, port, and TXT records
// // // The resolve operation is a separate network call.
// // let receiver = mdns.browse(info.get_fullname()).unwrap();
// // if let Ok(resolve_event) = receiver.recv_timeout(Duration::from_secs(2)) {
// // if let ServiceEvent::ServiceResolved(info) = resolve_event {
// // let ip = info.get_addresses().iter().next().unwrap();
// // let port = info.get_port();
// // let motherboard_id = info.get_property("id").map_or("N/A", |v| v.val_str());
// //
// // println!(" IP: {}:{}", ip, port);
// // println!(" Motherboard ID: {}", motherboard_id);
// // println!("------------------------");
// //
// // // TODO: Add this agent to your central list of discovered hosts.
// // }
// // } else {
// // println!("Could not resolve service '{}' in time.", info.get_fullname());
// // }
// // }
// // ServiceEvent::ServiceRemoved(info) => {
// // println!("\n--- Agent Removed ---");
// // println!(" Service Name: {}", info.get_fullname());
// // println!("---------------------");
// // // TODO: Remove this agent from your list.
// // }
// // _ => {
// // // We don't care about other event types for this example
// // }
// // }
// }
}
async fn discover_example() {
use mdns_sd::{ServiceDaemon, ServiceEvent};
// Create a daemon
let mdns = ServiceDaemon::new().expect("Failed to create daemon");
// Use recently added `ServiceEvent::ServiceData`.
mdns.use_service_data(true)
.expect("Failed to use ServiceData");
// Browse for a service type.
let service_type = "_mdns-sd-my-test._udp.local.";
let receiver = mdns.browse(service_type).expect("Failed to browse");
// Receive the browse events in sync or async. Here is
// an example of using a thread. Users can call `receiver.recv_async().await`
// if running in async environment.
std::thread::spawn(move || {
while let Ok(event) = receiver.recv() {
match event {
ServiceEvent::ServiceData(resolved) => {
println!("Resolved a new service: {}", resolved.fullname);
}
other_event => {
println!("Received other event: {:?}", &other_event);
}
}
}
});
// Gracefully shutdown the daemon.
std::thread::sleep(std::time::Duration::from_secs(1));
mdns.shutdown().unwrap();
}

View File

@@ -1,31 +0,0 @@
use clap::{Parser, ValueEnum};
mod advertise;
mod discover;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
#[arg(value_enum)]
profile: Profiles,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
enum Profiles {
Advertise,
Discover,
}
// The service type we are looking for.
const SERVICE_TYPE: &str = "_harmony._tcp.local.";
#[tokio::main]
async fn main() {
env_logger::init();
let args = Args::parse();
match args.profile {
Profiles::Advertise => advertise::advertise().await,
Profiles::Discover => discover::discover().await,
}
}

View File

@@ -1,8 +1,5 @@
#!/bin/sh
set -e
rustc --version
cargo check --all-targets --all-features --keep-going
cargo fmt --check
cargo clippy
cargo test

View File

@@ -1,8 +0,0 @@
Here lies all the data files required for an OKD cluster PXE boot setup.
This inclues ISO files, binary boot files, ipxe, etc.
TODO as of august 2025 :
- `harmony_inventory_agent` should be downloaded from official releases, this embedded version is practical for now though
- The cluster ssh key should be generated and handled by harmony with the private key saved in a secret store

View File

@@ -1,9 +0,0 @@
harmony_inventory_agent filter=lfs diff=lfs merge=lfs -text
os filter=lfs diff=lfs merge=lfs -text
os/centos-stream-9 filter=lfs diff=lfs merge=lfs -text
os/centos-stream-9/images filter=lfs diff=lfs merge=lfs -text
os/centos-stream-9/initrd.img filter=lfs diff=lfs merge=lfs -text
os/centos-stream-9/vmlinuz filter=lfs diff=lfs merge=lfs -text
os/centos-stream-9/images/efiboot.img filter=lfs diff=lfs merge=lfs -text
os/centos-stream-9/images/install.img filter=lfs diff=lfs merge=lfs -text
os/centos-stream-9/images/pxeboot filter=lfs diff=lfs merge=lfs -text

View File

@@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,108 +0,0 @@
# OPNsense PXE Lab Environment
This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall.
## Overview
The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM:
1. **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab.
2. **Two Virtual Machines**:
* `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server.
* `pxe-node-1`: A client VM configured to boot from the network.
## Prerequisites
Ensure you have the following software installed on your Arch Linux host:
* `libvirt`
* `qemu`
* `virt-install` (from the `virt-install` package)
* `curl`
* `bzip2`
## Usage
### 1. Create the Environment
Run the `up` command to download the necessary images and create the network and VMs.
```bash
sudo ./pxe_vm_lab_setup.sh up
```
### 2. Install and Configure OPNsense
The OPNsense VM is created but the OS needs to be installed manually via the console.
1. **Connect to the VM console**:
```bash
sudo virsh console opnsense-pxe
```
2. **Log in as the installer**:
* Username: `installer`
* Password: `opnsense`
3. **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`):
* Find the MAC address for the `harmonylan` interface by running this command in another terminal:
```bash
virsh domiflist opnsense-pxe
# Example output:
# Interface Type Source Model MAC
# ---------------------------------------------------------
# vnet18 network default virtio 52:54:00:b5:c4:6d
# vnet19 network harmonylan virtio 52:54:00:21:f9:ba
```
* Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**.
* Assign the other interface as your **WAN**.
4. After the installation is complete, **shut down** the VM from the console menu.
5. **Detach the installation media** by editing the VM's configuration:
```bash
sudo virsh edit opnsense-pxe
```
Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`).
6. **Start the VM** to boot into the newly installed system:
```bash
sudo virsh start opnsense-pxe
```
### 3. Connect to OPNsense from Your Host
To configure OPNsense, you need to connect your host to the `harmonylan` network.
1. By default, OPNsense configures its LAN interface with the IP `192.168.1.1`.
2. Assign a compatible IP address to your host's `virbr1` bridge interface:
```bash
sudo ip addr add 192.168.1.5/24 dev virbr1
```
3. You can now access the OPNsense VM from your host:
* **SSH**: `ssh root@192.168.1.1` (password: `opnsense`)
* **Web UI**: `https://192.168.1.1`
### 4. Configure PXE Services with Harmony
With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details:
* **Hostname/IP**: `192.168.1.1`
* **Credentials**: `root` / `opnsense`
### 5. Boot the PXE Client
Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network.
```bash
sudo virsh start pxe-node-1
sudo virsh console pxe-node-1
```
## Cleanup
To destroy all VMs and networks created by the script, run the `clean` command:
```bash
sudo ./pxe_vm_lab_setup.sh clean
```

View File

@@ -1,191 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# --- Configuration ---
LAB_DIR="/var/lib/harmony_pxe_test"
IMG_DIR="${LAB_DIR}/images"
STATE_DIR="${LAB_DIR}/state"
VM_OPN="opnsense-pxe"
VM_PXE="pxe-node-1"
NET_HARMONYLAN="harmonylan"
# Network settings for the isolated LAN
VLAN_CIDR="192.168.150.0/24"
VLAN_GW="192.168.150.1"
VLAN_MASK="255.255.255.0"
# VM Specifications
RAM_OPN="2048"
VCPUS_OPN="2"
DISK_OPN_GB="10"
OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant
RAM_PXE="4096"
VCPUS_PXE="2"
DISK_PXE_GB="40"
OS_VARIANT_LINUX="centos-stream9"
OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2"
OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img"
CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso"
CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso"
CONNECT_URI="qemu:///system"
download_if_missing() {
local url="$1"
local dest="$2"
if [[ ! -f "$dest" ]]; then
echo "Downloading $url to $dest"
mkdir -p "$(dirname "$dest")"
local tmp
tmp="$(mktemp)"
curl -L --progress-bar "$url" -o "$tmp"
case "$url" in
*.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;;
*) mv "$tmp" "$dest" ;;
esac
else
echo "Already present: $dest"
fi
}
# Ensures a libvirt network is defined and active
ensure_network() {
local net_name="$1"
local net_xml_path="$2"
if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then
echo "Network ${net_name} already exists."
else
echo "Defining network ${net_name} from ${net_xml_path}"
virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}"
fi
if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then
echo "Starting network ${net_name}..."
virsh --connect "${CONNECT_URI}" net-start "${net_name}"
virsh --connect "${CONNECT_URI}" net-autostart "${net_name}"
fi
}
# Destroys a VM completely
destroy_vm() {
local vm_name="$1"
if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then
echo "Destroying and undefining VM: ${vm_name}"
virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true
virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram
fi
}
# Destroys a libvirt network
destroy_network() {
local net_name="$1"
if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then
echo "Destroying and undefining network: ${net_name}"
virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true
virsh --connect "${CONNECT_URI}" net-undefine "$net_name"
fi
}
# --- Main Logic ---
create_lab_environment() {
# Create network definition files
cat > "${STATE_DIR}/default.xml" <<EOF
<network>
<name>default</name>
<forward mode='nat'/>
<bridge name='virbr0' stp='on' delay='0'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.100' end='192.168.122.200'/>
</dhcp>
</ip>
</network>
EOF
cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF
<network>
<name>${NET_HARMONYLAN}</name>
<bridge name='virbr1' stp='on' delay='0'/>
</network>
EOF
# Ensure both networks exist and are active
ensure_network "default" "${STATE_DIR}/default.xml"
ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml"
# --- Create OPNsense VM (MODIFIED SECTION) ---
local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2"
if [[ ! -f "$disk_opn" ]]; then
qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G"
fi
echo "Creating OPNsense VM using serial image..."
virt-install \
--connect "${CONNECT_URI}" \
--name "${VM_OPN}" \
--ram "${RAM_OPN}" \
--vcpus "${VCPUS_OPN}" \
--cpu host-passthrough \
--os-variant "${OS_VARIANT_OPN}" \
--graphics none \
--noautoconsole \
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
--network network=default,model=virtio \
--network network="${NET_HARMONYLAN}",model=virtio \
--boot uefi,menu=on
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
echo "The VM will boot from the serial installation image."
echo "Login with user 'installer' and password 'opnsense' to start the installation."
echo "Install onto the VirtIO disk (vtbd0)."
echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system."
# --- Create PXE Client VM ---
local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2"
if [[ ! -f "$disk_pxe" ]]; then
qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G"
fi
echo "Creating PXE client VM..."
virt-install \
--connect "${CONNECT_URI}" \
--name "${VM_PXE}" \
--ram "${RAM_PXE}" \
--vcpus "${VCPUS_PXE}" \
--cpu host-passthrough \
--os-variant "${OS_VARIANT_LINUX}" \
--graphics none \
--noautoconsole \
--disk path="${disk_pxe}",format=qcow2,bus=virtio \
--network network="${NET_HARMONYLAN}",model=virtio \
--pxe \
--boot uefi,menu=on
echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}."
}
# --- Script Entrypoint ---
case "${1:-}" in
up)
mkdir -p "${IMG_DIR}" "${STATE_DIR}"
download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH"
download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH"
create_lab_environment
echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs."
;;
clean)
destroy_vm "${VM_PXE}"
destroy_vm "${VM_OPN}"
destroy_network "${NET_HARMONYLAN}"
# Optionally destroy the default network if you want a full reset
# destroy_network "default"
echo "Cleanup complete."
;;
*)
echo "Usage: sudo $0 {up|clean}"
exit 1
;;
esac

View File

@@ -1,15 +0,0 @@
[package]
name = "example-application-monitoring-with-tenant"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
env_logger.workspace = true
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
logging = "0.1.0"
tokio.workspace = true
url.workspace = true

View File

@@ -1,56 +0,0 @@
use std::{path::PathBuf, str::FromStr, sync::Arc};
use harmony::{
inventory::Inventory,
modules::{
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
tenant::TenantScore,
},
topology::{K8sAnywhereTopology, tenant::TenantConfig},
};
use harmony_types::id::Id;
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
//TODO there is a bug where the application is deployed into the namespace matching the
//application name and the tenant is created in the namesapce matching the tenant name
//in order for the application to be deployed in the tenant namespace the application.name and
//the TenantConfig.name must match
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
name: "example-monitoring".to_string(),
..Default::default()
},
};
let application = Arc::new(RustWebapp {
name: "example-monitoring".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
});
let webhook_receiver = WebhookReceiver {
name: "sample-webhook-receiver".to_string(),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![Box::new(Monitoring {
alert_receiver: vec![Box::new(webhook_receiver)],
application: application.clone(),
})],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant), Box::new(app)],
None,
)
.await
.unwrap();
}

View File

@@ -1,27 +1,20 @@
use harmony::{
inventory::Inventory,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
inventory::LaunchDiscoverInventoryAgentScore,
},
maestro::Maestro,
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::LocalhostTopology,
};
#[tokio::main]
async fn main() {
harmony_cli::run(
Inventory::autoload(),
LocalhostTopology::new(),
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(LaunchDiscoverInventoryAgentScore {
discovery_timeout: Some(10),
}),
],
None,
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = LocalhostTopology::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -125,47 +125,40 @@ spec:
name: nginx"#,
)
.unwrap();
deployment
return deployment;
}
fn nginx_deployment_2() -> Deployment {
let pod_template = PodTemplateSpec {
metadata: Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
let mut pod_template = PodTemplateSpec::default();
pod_template.metadata = Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
..Default::default()
});
pod_template.spec = Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}),
spec: Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}],
..Default::default()
}),
}],
..Default::default()
});
let mut spec = DeploymentSpec::default();
spec.template = pod_template;
spec.selector = LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
};
let spec = DeploymentSpec {
template: pod_template,
selector: LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
},
..Default::default()
};
let mut deployment = Deployment::default();
deployment.spec = Some(spec);
deployment.metadata.name = Some("nginx-test".to_string());
Deployment {
spec: Some(spec),
metadata: ObjectMeta {
name: Some("nginx-test".to_string()),
..Default::default()
},
..Default::default()
}
deployment
}
fn nginx_deployment() -> Deployment {

View File

@@ -1,10 +1,10 @@
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::lamp::{LAMPConfig, LAMPScore},
topology::K8sAnywhereTopology,
topology::{K8sAnywhereTopology, Url},
};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
@@ -24,7 +24,7 @@ async fn main() {
// This config can be extended as needed for more complicated configurations
config: LAMPConfig {
project_root: "./php".into(),
database_size: "4Gi".to_string().into(),
database_size: format!("4Gi").into(),
..Default::default()
},
};
@@ -43,13 +43,15 @@ async fn main() {
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
// locally, to development environment from a CI, to staging, and to production with settings
// that automatically adapt to each environment grade.
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(lamp_stack)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(lamp_stack)]);
// Here we bootstrap the CLI, this gives some nice features if you need them
harmony_cli::init(maestro, None).await.unwrap();
}
// That's it, end of the infra as code.

View File

@@ -6,9 +6,8 @@ readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_macros = { path = "../../harmony_macros" }
harmony_types = { path = "../../harmony_types" }
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
harmony_macros = { version = "0.1.0", path = "../../harmony_macros" }
tokio.workspace = true
url.workspace = true

View File

@@ -2,6 +2,7 @@ use std::collections::HashMap;
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
@@ -22,9 +23,8 @@ use harmony::{
k8s::pvc::high_pvc_fill_rate_over_two_days,
},
},
topology::K8sAnywhereTopology,
topology::{K8sAnywhereTopology, Url},
};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
@@ -51,8 +51,8 @@ async fn main() {
let service_monitor_endpoint = ServiceMonitorEndpoint {
port: Some("80".to_string()),
path: Some("/metrics".to_string()),
scheme: Some(HTTPScheme::HTTP),
path: "/metrics".to_string(),
scheme: HTTPScheme::HTTP,
..Default::default()
};
@@ -74,13 +74,13 @@ async fn main() {
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
service_monitors: vec![service_monitor],
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(alerting_score)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(alerting_score)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -7,8 +7,7 @@ license.workspace = true
[dependencies]
cidr.workspace = true
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
tokio.workspace = true
url.workspace = true

View File

@@ -1,7 +1,9 @@
use std::{collections::HashMap, str::FromStr};
use std::collections::HashMap;
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
@@ -18,18 +20,16 @@ use harmony::{
tenant::TenantScore,
},
topology::{
K8sAnywhereTopology,
K8sAnywhereTopology, Url,
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
},
};
use harmony_types::id::Id;
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("1234").unwrap(),
id: Id::from_string("1234".to_string()),
name: "test-tenant".to_string(),
resource_limits: ResourceLimits {
cpu_request_cores: 6.0,
@@ -54,8 +54,8 @@ async fn main() {
let service_monitor_endpoint = ServiceMonitorEndpoint {
port: Some("80".to_string()),
path: Some("/metrics".to_string()),
scheme: Some(HTTPScheme::HTTP),
path: "/metrics".to_string(),
scheme: HTTPScheme::HTTP,
..Default::default()
};
@@ -78,13 +78,13 @@ async fn main() {
rules: vec![Box::new(additional_rules)],
service_monitors: vec![service_monitor],
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant), Box::new(alerting_score)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(tenant), Box::new(alerting_score)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -5,22 +5,23 @@ use std::{
use cidr::Ipv4Cidr;
use harmony::{
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
http::StaticFilesHttpScore,
ipxe::IpxeScore,
okd::{
bootstrap_dhcp::OKDBootstrapDhcpScore,
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
dns::OKDDnsScore, ipxe::OkdIpxeScore,
dns::OKDDnsScore,
},
tftp::TftpScore,
},
topology::{LogicalHost, UnmanagedRouter},
topology::{LogicalHost, UnmanagedRouter, Url},
};
use harmony_macros::{ip, mac_address};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
@@ -86,7 +87,8 @@ async fn main() {
let inventory = Inventory {
location: Location::new("I am mobile".to_string(), "earth".to_string()),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
.management(Arc::new(OPNSenseManagementInterface::new()))]),
storage_host: vec![],
worker_host: vec![
PhysicalHost::empty(HostCategory::Server)
@@ -124,37 +126,20 @@ async fn main() {
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore {
folder_to_serve: Some(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
)),
files: vec![],
};
let kickstart_filename = "inventory.kickstart".to_string();
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let ipxe_score = OkdIpxeScore {
kickstart_filename,
harmony_inventory_agent,
cluster_pubkey_filename,
};
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
],
)
.await
.unwrap();
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let ipxe_score = IpxeScore::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -1,18 +1,20 @@
use harmony::{
inventory::Inventory, modules::monitoring::ntfy::ntfy::NtfyScore, topology::K8sAnywhereTopology,
inventory::Inventory, maestro::Maestro, modules::monitoring::ntfy::ntfy::NtfyScore,
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(NtfyScore {
namespace: "monitoring".to_string(),
host: "localhost".to_string(),
})],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(NtfyScore {
namespace: "monitoring".to_string(),
host: "localhost".to_string(),
})]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -1,21 +0,0 @@
[package]
name = "example-okd-install"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true

View File

@@ -1,20 +0,0 @@
mod topology;
use crate::topology::{get_inventory, get_topology};
use harmony::modules::okd::{installation::OKDInstallationScore, ipxe::OkdIpxeScore};
#[tokio::main]
async fn main() {
let inventory = get_inventory();
let topology = get_topology().await;
let kickstart_filename = "inventory.kickstart".to_string();
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let okd_install = Box::new(OKDInstallationScore {});
harmony_cli::run(inventory, topology, vec![okd_install], None)
.await
.unwrap();
}

View File

@@ -1,77 +0,0 @@
use cidr::Ipv4Cidr;
use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager};
use serde::{Deserialize, Serialize};
use std::{net::IpAddr, sync::Arc};
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct OPNSenseFirewallConfig {
username: String,
password: String,
}
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("opnsense-1"),
};
let config = SecretManager::get::<OPNSenseFirewallConfig>().await;
let config = config.unwrap();
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(
firewall,
None,
&config.username,
&config.password,
)
.await,
);
let lan_subnet = ipv4!("192.168.1.0");
let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![LogicalHost {
ip: ip!("192.168.1.20"),
name: "cp0".to_string(),
}],
bootstrap_host: LogicalHost {
ip: ip!("192.168.1.20"),
name: "bootstrap".to_string(),
},
workers: vec![],
switch: vec![],
}
}
pub fn get_inventory() -> Inventory {
Inventory {
location: Location::new(
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
"testopnsense".to_string(),
),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![],
}
}

View File

@@ -1,7 +0,0 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
-----END OPENSSH PRIVATE KEY-----

View File

@@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2

View File

@@ -1,21 +0,0 @@
[package]
name = "example-pxe"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true

View File

@@ -1,24 +0,0 @@
mod topology;
use crate::topology::{get_inventory, get_topology};
use harmony::modules::okd::ipxe::OkdIpxeScore;
#[tokio::main]
async fn main() {
let inventory = get_inventory();
let topology = get_topology().await;
let kickstart_filename = "inventory.kickstart".to_string();
let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let ipxe_score = OkdIpxeScore {
kickstart_filename,
harmony_inventory_agent,
cluster_pubkey_filename,
};
harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None)
.await
.unwrap();
}

View File

@@ -1,77 +0,0 @@
use cidr::Ipv4Cidr;
use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager};
use serde::{Deserialize, Serialize};
use std::{net::IpAddr, sync::Arc};
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct OPNSenseFirewallConfig {
username: String,
password: String,
}
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("opnsense-1"),
};
let config = SecretManager::get::<OPNSenseFirewallConfig>().await;
let config = config.unwrap();
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(
firewall,
None,
&config.username,
&config.password,
)
.await,
);
let lan_subnet = ipv4!("192.168.1.0");
let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![LogicalHost {
ip: ip!("10.100.8.20"),
name: "cp0".to_string(),
}],
bootstrap_host: LogicalHost {
ip: ip!("10.100.8.20"),
name: "cp0".to_string(),
},
workers: vec![],
switch: vec![],
}
}
pub fn get_inventory() -> Inventory {
Inventory {
location: Location::new(
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
"testopnsense".to_string(),
),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![],
}
}

View File

@@ -1,7 +0,0 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
-----END OPENSSH PRIVATE KEY-----

View File

@@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2

View File

@@ -16,3 +16,4 @@ harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }

View File

@@ -8,22 +8,24 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::StaticFilesHttpScore,
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
opnsense::OPNsenseShellCommandScore,
opnsense::{OPNSenseLaunchUpgrade, OPNsenseShellCommandScore},
tftp::TftpScore,
},
topology::{LogicalHost, UnmanagedRouter},
topology::{LogicalHost, UnmanagedRouter, Url},
};
use harmony_macros::{ip, mac_address};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
env_logger::init();
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.5.229"),
ip: ip!("192.168.122.106"),
name: String::from("opnsense-1"),
};
@@ -63,7 +65,8 @@ async fn main() {
"wk".to_string(),
),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
.management(Arc::new(OPNSenseManagementInterface::new()))]),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![
@@ -80,31 +83,26 @@ async fn main() {
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore {
folder_to_serve: Some(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
)),
files: vec![],
};
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
)
.await
.unwrap();
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
// Box::new(OPNSenseLaunchUpgrade {
// opnsense: opnsense.get_opnsense_config(),
// }),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -1,3 +0,0 @@
Dockerfile.harmony
.harmony_generated
harmony

View File

@@ -2,58 +2,42 @@ use std::{path::PathBuf, sync::Arc};
use harmony::{
inventory::Inventory,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
},
monitoring::alert_channel::{
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
},
maestro::Maestro,
modules::application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
},
topology::K8sAnywhereTopology,
topology::{K8sAnywhereTopology, Url},
};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
env_logger::init();
let topology = K8sAnywhereTopology::from_env();
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
.await
.unwrap();
let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
});
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
};
let webhook_receiver = WebhookReceiver {
name: "sample-webhook-receiver".to_string(),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![
Box::new(ContinuousDelivery {
application: application.clone(),
}),
}), // TODO add monitoring, backups, multisite ha, etc
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver), Box::new(webhook_receiver)],
}),
// TODO add backups, multisite ha, etc
],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(app)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(app)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -0,0 +1,16 @@
FROM rust:bookworm as builder
RUN apt-get update && apt-get install -y --no-install-recommends clang wget && wget https://github.com/cargo-bins/cargo-binstall/releases/latest/download/cargo-binstall-x86_64-unknown-linux-musl.tgz && tar -xvf cargo-binstall-x86_64-unknown-linux-musl.tgz && cp cargo-binstall /usr/local/cargo/bin && rm cargo-binstall-x86_64-unknown-linux-musl.tgz cargo-binstall && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN cargo binstall cargo-leptos -y
RUN rustup target add wasm32-unknown-unknown
WORKDIR /app
COPY . .
RUN cargo leptos build --release -vv
FROM debian:bookworm-slim
RUN groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser
ENV LEPTOS_SITE_ADDR=0.0.0.0:3000
EXPOSE 3000/tcp
WORKDIR /home/appuser
COPY --from=builder /app/target/site/pkg /home/appuser/pkg
COPY --from=builder /app/target/release/harmony-example-rust-webapp /home/appuser/harmony-example-rust-webapp
USER appuser
CMD /home/appuser/harmony-example-rust-webapp

View File

@@ -1,30 +1,30 @@
use std::str::FromStr;
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::tenant::TenantScore,
topology::{K8sAnywhereTopology, tenant::TenantConfig},
};
use harmony_types::id::Id;
#[tokio::main]
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
id: Id::from_str("test-tenant-id"),
name: "testtenant".to_string(),
..Default::default()
},
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(tenant)]);
harmony_cli::init(maestro, None).await.unwrap();
}
// TODO write tests

View File

@@ -2,6 +2,7 @@ use std::net::{SocketAddr, SocketAddrV4};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
@@ -15,19 +16,18 @@ use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
harmony_tui::run(
Inventory::autoload(),
DummyInfra {},
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
],
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
]);
harmony_tui::init(maestro).await.unwrap();
}
fn build_large_score() -> LoadBalancerScore {

View File

@@ -1,11 +0,0 @@
[package]
name = "example_validate_ceph_cluster_health"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
tokio.workspace = true

View File

@@ -1,18 +0,0 @@
use harmony::{
inventory::Inventory,
modules::storage::ceph::ceph_validate_health_score::CephVerifyClusterHealth,
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let ceph_health_score = CephVerifyClusterHealth {
rook_ceph_namespace: "rook-ceph".to_string(),
};
let topology = K8sAnywhereTopology::from_env();
let inventory = Inventory::autoload();
harmony_cli::run(inventory, topology, vec![Box::new(ceph_health_score)], None)
.await
.unwrap();
}

View File

@@ -5,17 +5,16 @@ version.workspace = true
readme.workspace = true
license.workspace = true
[features]
testing = []
[dependencies]
rand = "0.9"
hex = "0.4"
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
libredfish = "0.1.1"
reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde.workspace = true
serde_json.workspace = true
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
@@ -28,28 +27,29 @@ harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid.workspace = true
url.workspace = true
kube = { workspace = true, features = ["derive"] }
kube.workspace = true
k8s-openapi.workspace = true
serde_yaml.workspace = true
http.workspace = true
serde-value.workspace = true
inquire.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories.workspace = true
lazy_static.workspace = true
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true
email_address = "0.2.9"
chrono.workspace = true
fqdn = { version = "0.4.6", features = [
"domain-label-cannot-start-or-end-with-hyphen",
"domain-label-length-limited-to-63",
"domain-name-without-special-chars",
"domain-name-length-limited-to-255",
"punycode",
"serde",
"domain-label-cannot-start-or-end-with-hyphen",
"domain-label-length-limited-to-63",
"domain-name-without-special-chars",
"domain-name-length-limited-to-255",
"punycode",
"serde",
] }
temp-dir = "0.1.14"
dyn-clone = "1.0.19"
@@ -57,20 +57,12 @@ similar.workspace = true
futures-util = "0.3.31"
tokio-util = "0.7.15"
strum = { version = "0.27.1", features = ["derive"] }
tempfile.workspace = true
tempfile = "3.20.0"
serde_with = "3.14.0"
schemars = "0.8.22"
kube-derive = "1.1.0"
bollard.workspace = true
tar.workspace = true
base64.workspace = true
thiserror.workspace = true
once_cell = "1.21.3"
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" }
askama.workspace = true
sqlx.workspace = true
inquire.workspace = true
figment.workspace = true
[dev-dependencies]
pretty_assertions.workspace = true

Binary file not shown.

View File

@@ -1,23 +1,66 @@
use figment::{
Error, Figment, Metadata, Profile, Provider,
providers::{Env, Format},
value::{Dict, Map},
};
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
lazy_static! {
pub static ref HARMONY_DATA_DIR: PathBuf = directories::BaseDirs::new()
.unwrap()
.data_dir()
.join("harmony");
pub static ref REGISTRY_URL: String =
std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string());
pub static ref REGISTRY_PROJECT: String =
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
pub static ref DRY_RUN: bool =
std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false));
pub static ref DEFAULT_DATABASE_URL: String = "sqlite://harmony.sqlite".to_string();
pub static ref DATABASE_URL: String = std::env::var("HARMONY_DATABASE_URL")
.map(|value| if value.is_empty() {
(*DEFAULT_DATABASE_URL).clone()
} else {
value
})
.unwrap_or((*DEFAULT_DATABASE_URL).clone());
#[derive(Debug, Deserialize, Serialize)]
pub struct Config {
pub data_dir: PathBuf,
pub registry_url: String,
pub registry_project: String,
pub dry_run: bool,
pub run_upgrades: bool,
}
impl Default for Config {
fn default() -> Self {
Config {
data_dir: directories::BaseDirs::new()
.unwrap()
.data_dir()
.join("harmony"),
registry_url: "hub.nationtech.io".to_string(),
registry_project: "harmony".to_string(),
dry_run: true,
run_upgrades: false,
}
}
}
impl Config {
pub fn load() -> Result<Self, figment::Error> {
Figment::from(Config::default())
.merge(Env::prefixed("HARMONY_"))
.extract()
}
fn from<T: Provider>(provider: T) -> Result<Config, Error> {
Figment::from(provider).extract()
}
fn figment() -> Figment {
use figment::providers::Env;
// In reality, whatever the library desires.
Figment::from(Config::default()).merge(Env::prefixed("HARMONY_"))
}
}
impl Provider for Config {
fn metadata(&self) -> Metadata {
Metadata::named("Harmony Config")
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
figment::providers::Serialized::defaults(Config::default()).data()
}
fn profile(&self) -> Option<Profile> {
// Optionally, a profile that's selected by default.
Some(Profile::Default)
}
}

View File

@@ -1,22 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileContent {
pub path: FilePath,
pub content: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum FilePath {
Relative(String),
Absolute(String),
}
impl std::fmt::Display for FilePath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FilePath::Relative(path) => f.write_fmt(format_args!("./{path}")),
FilePath::Absolute(path) => f.write_fmt(format_args!("/{path}")),
}
}
}

View File

@@ -1,6 +1,5 @@
use rand::distr::Alphanumeric;
use rand::distr::SampleString;
use std::str::FromStr;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
@@ -25,27 +24,13 @@ pub struct Id {
}
impl Id {
pub fn empty() -> Self {
Id {
value: String::new(),
}
}
}
impl FromStr for Id {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Id {
value: s.to_string(),
})
}
}
impl From<String> for Id {
fn from(value: String) -> Self {
pub fn from_string(value: String) -> Self {
Self { value }
}
pub fn from_str(value: &str) -> Self {
Self::from_string(value.to_string())
}
}
impl std::fmt::Display for Id {

View File

@@ -1,4 +1,4 @@
mod file;
mod id;
mod version;
pub use file::*;
pub use id::*;
pub use version::*;

View File

@@ -47,7 +47,7 @@ impl serde::Serialize for Version {
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.value.fmt(f)
return self.value.fmt(f);
}
}

View File

@@ -1,7 +1,8 @@
use std::fmt;
use async_trait::async_trait;
use harmony_types::net::IpAddress;
use super::topology::IpAddress;
#[derive(Debug)]
pub enum ExecutorError {

View File

@@ -1,230 +1,133 @@
use std::sync::Arc;
use derive_new::new;
use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive};
use harmony_types::net::MacAddress;
use serde::{Deserialize, Serialize};
use serde::{Serialize, Serializer, ser::SerializeStruct};
use serde_value::Value;
pub type HostGroup = Vec<PhysicalHost>;
pub type SwitchGroup = Vec<Switch>;
pub type FirewallGroup = Vec<PhysicalHost>;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone)]
pub struct PhysicalHost {
pub id: Id,
pub category: HostCategory,
pub network: Vec<NetworkInterface>,
pub storage: Vec<StorageDrive>,
pub management: Arc<dyn ManagementInterface>,
pub storage: Vec<Storage>,
pub labels: Vec<Label>,
pub memory_modules: Vec<MemoryModule>,
pub cpus: Vec<CPU>,
pub memory_size: Option<u64>,
pub cpu_count: Option<u64>,
}
impl PhysicalHost {
pub fn empty(category: HostCategory) -> Self {
Self {
id: Id::empty(),
category,
network: vec![],
storage: vec![],
labels: vec![],
memory_modules: vec![],
cpus: vec![],
management: Arc::new(ManualManagementInterface {}),
memory_size: None,
cpu_count: None,
}
}
pub fn summary(&self) -> String {
let mut parts = Vec::new();
// Part 1: System Model (from labels) or Category as a fallback
let model = self
.labels
.iter()
.find(|l| l.name == "system-product-name" || l.name == "model")
.map(|l| l.value.clone())
.unwrap_or_else(|| self.category.to_string());
parts.push(model);
// Part 2: CPU Information
if !self.cpus.is_empty() {
let cpu_count = self.cpus.len();
let total_cores = self.cpus.iter().map(|c| c.cores).sum::<u32>();
let total_threads = self.cpus.iter().map(|c| c.threads).sum::<u32>();
let model_name = &self.cpus[0].model;
let cpu_summary = if cpu_count > 1 {
format!(
"{}x {} ({}c/{}t)",
cpu_count, model_name, total_cores, total_threads
)
} else {
format!("{} ({}c/{}t)", model_name, total_cores, total_threads)
};
parts.push(cpu_summary);
}
// Part 3: Memory Information
if !self.memory_modules.is_empty() {
let total_mem_bytes = self
.memory_modules
.iter()
.map(|m| m.size_bytes)
.sum::<u64>();
let total_mem_gb = (total_mem_bytes as f64 / (1024.0 * 1024.0 * 1024.0)).round() as u64;
// Find the most common speed among modules
let mut speeds = std::collections::HashMap::new();
for module in &self.memory_modules {
if let Some(speed) = module.speed_mhz {
*speeds.entry(speed).or_insert(0) += 1;
}
}
let common_speed = speeds
.into_iter()
.max_by_key(|&(_, count)| count)
.map(|(speed, _)| speed);
if let Some(speed) = common_speed {
parts.push(format!("{} GB RAM @ {}MHz", total_mem_gb, speed));
} else {
parts.push(format!("{} GB RAM", total_mem_gb));
}
}
// Part 4: Storage Information
if !self.storage.is_empty() {
let total_storage_bytes = self.storage.iter().map(|d| d.size_bytes).sum::<u64>();
let drive_count = self.storage.len();
let first_drive_model = &self.storage[0].model;
// Helper to format bytes into TB or GB
let format_storage = |bytes: u64| {
let tb = bytes as f64 / (1024.0 * 1024.0 * 1024.0 * 1024.0);
if tb >= 1.0 {
format!("{:.2} TB", tb)
} else {
let gb = bytes as f64 / (1024.0 * 1024.0 * 1024.0);
format!("{:.0} GB", gb)
}
};
let storage_summary = if drive_count > 1 {
format!(
"{} Storage ({}x {})",
format_storage(total_storage_bytes),
drive_count,
first_drive_model
)
} else {
format!(
"{} Storage ({})",
format_storage(total_storage_bytes),
first_drive_model
)
};
parts.push(storage_summary);
}
// Part 5: Network Information
// Prioritize an "up" interface with an IPv4 address
let best_nic = self
.network
.iter()
.find(|n| n.is_up && !n.ipv4_addresses.is_empty())
.or_else(|| self.network.first());
if let Some(nic) = best_nic {
let speed = nic
.speed_mbps
.map(|s| format!("{}Gbps", s / 1000))
.unwrap_or_else(|| "N/A".to_string());
let mac = nic.mac_address.to_string();
let nic_summary = if let Some(ip) = nic.ipv4_addresses.first() {
format!("NIC: {} ({}, {})", speed, ip, mac)
} else {
format!("NIC: {} ({})", speed, mac)
};
parts.push(nic_summary);
}
parts.join(" | ")
}
pub fn cluster_mac(&self) -> MacAddress {
self.network
.first()
.get(0)
.expect("Cluster physical host should have a network interface")
.mac_address
.clone()
}
pub fn mac_address(mut self, mac_address: MacAddress) -> Self {
self.network.push(NetworkInterface {
name: String::new(),
mac_address,
speed_mbps: None,
is_up: false,
mtu: 0,
ipv4_addresses: vec![],
ipv6_addresses: vec![],
driver: String::new(),
firmware_version: None,
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {
self.cpu_count = cpu_count;
self
}
pub fn memory_size(mut self, memory_size: Option<u64>) -> Self {
self.memory_size = memory_size;
self
}
pub fn storage(
mut self,
connection: StorageConnectionType,
kind: StorageKind,
size: u64,
serial: String,
) -> Self {
self.storage.push(Storage {
connection,
kind,
size,
serial,
});
self
}
pub fn get_mac_address(&self) -> Vec<MacAddress> {
self.network.iter().map(|nic| nic.mac_address).collect()
pub fn mac_address(mut self, mac_address: MacAddress) -> Self {
self.network.push(NetworkInterface {
name: None,
mac_address,
speed: None,
});
self
}
pub fn label(mut self, name: String, value: String) -> Self {
self.labels.push(Label { name, value });
self
}
pub fn management(mut self, management: Arc<dyn ManagementInterface>) -> Self {
self.management = management;
self
}
}
// Custom Serialize implementation for PhysicalHost
// impl Serialize for PhysicalHost {
// fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
// where
// S: Serializer,
// {
// // Determine the number of fields
// let mut num_fields = 5; // category, network, storage, labels, management
// if self.memory_modules.is_some() {
// num_fields += 1;
// }
// if self.cpus.is_some() {
// num_fields += 1;
// }
//
// // Create a serialization structure
// let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
//
// // Serialize the standard fields
// state.serialize_field("category", &self.category)?;
// state.serialize_field("network", &self.network)?;
// state.serialize_field("storage", &self.storage)?;
// state.serialize_field("labels", &self.labels)?;
//
// // Serialize optional fields
// if let Some(memory) = self.memory_modules {
// state.serialize_field("memory_size", &memory)?;
// }
// if let Some(cpu) = self.cpus {
// state.serialize_field("cpu_count", &cpu)?;
// }
//
// let mgmt_data = self.management.serialize_management();
// // pub management: Arc<dyn ManagementInterface>,
//
// // Handle management interface - either as a field or flattened
// state.serialize_field("management", &mgmt_data)?;
//
// state.end()
// }
// }
impl Serialize for PhysicalHost {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Determine the number of fields
let mut num_fields = 5; // category, network, storage, labels, management
if self.memory_size.is_some() {
num_fields += 1;
}
if self.cpu_count.is_some() {
num_fields += 1;
}
// Create a serialization structure
let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
// Serialize the standard fields
state.serialize_field("category", &self.category)?;
state.serialize_field("network", &self.network)?;
state.serialize_field("storage", &self.storage)?;
state.serialize_field("labels", &self.labels)?;
// Serialize optional fields
if let Some(memory) = self.memory_size {
state.serialize_field("memory_size", &memory)?;
}
if let Some(cpu) = self.cpu_count {
state.serialize_field("cpu_count", &cpu)?;
}
let mgmt_data = self.management.serialize_management();
// pub management: Arc<dyn ManagementInterface>,
// Handle management interface - either as a field or flattened
state.serialize_field("management", &mgmt_data)?;
state.end()
}
}
#[derive(new, Serialize)]
pub struct ManualManagementInterface;
@@ -269,17 +172,66 @@ where
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize)]
pub enum HostCategory {
Server,
Firewall,
Switch,
}
#[derive(Debug, new, Clone, Serialize)]
pub struct NetworkInterface {
pub name: Option<String>,
pub mac_address: MacAddress,
pub speed: Option<u64>,
}
#[cfg(test)]
use harmony_macros::mac_address;
#[cfg(test)]
impl NetworkInterface {
pub fn dummy() -> Self {
Self {
name: Some(String::new()),
mac_address: mac_address!("00:00:00:00:00:00"),
speed: Some(0),
}
}
}
use harmony_types::id::Id;
#[derive(Debug, new, Clone, Serialize)]
pub enum StorageConnectionType {
Sata3g,
Sata6g,
Sas6g,
Sas12g,
PCIE,
}
#[derive(Debug, Clone, Serialize)]
pub enum StorageKind {
SSD,
NVME,
HDD,
}
#[derive(Debug, new, Clone, Serialize)]
pub struct Storage {
pub connection: StorageConnectionType,
pub kind: StorageKind,
pub size: u64,
pub serial: String,
}
#[cfg(test)]
impl Storage {
pub fn dummy() -> Self {
Self {
connection: StorageConnectionType::Sata3g,
kind: StorageKind::SSD,
size: 0,
serial: String::new(),
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Switch {
@@ -287,7 +239,7 @@ pub struct Switch {
_management_interface: NetworkInterface,
}
#[derive(Debug, new, Clone, Serialize, Deserialize)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Label {
pub name: String,
pub value: String,
@@ -310,65 +262,146 @@ impl Location {
}
}
impl std::fmt::Display for HostCategory {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HostCategory::Server => write!(f, "Server"),
HostCategory::Firewall => write!(f, "Firewall"),
HostCategory::Switch => write!(f, "Switch"),
}
}
}
impl std::fmt::Display for Label {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}: {}", self.name, self.value)
}
}
impl std::fmt::Display for Location {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Address: {}, Name: {}", self.address, self.name)
}
}
impl std::fmt::Display for PhysicalHost {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.summary())
}
}
impl std::fmt::Display for Switch {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Switch with {} interfaces", self._interface.len())
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
// Mock implementation of ManagementInterface
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockHPIlo {
ip: String,
username: String,
password: String,
firmware_version: String,
}
impl ManagementInterface for MockHPIlo {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
// Another mock implementation
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockDellIdrac {
hostname: String,
port: u16,
api_token: String,
}
impl ManagementInterface for MockDellIdrac {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
#[test]
fn test_serialize_physical_host_with_hp_ilo() {
// Create a PhysicalHost with HP iLO management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockHPIlo {
ip: "192.168.1.100".to_string(),
username: "admin".to_string(),
password: "password123".to_string(),
firmware_version: "2.5.0".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())],
memory_size: Some(64_000_000),
cpu_count: Some(16),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the HP iLO details
assert!(json.contains("192.168.1.100"));
assert!(json.contains("admin"));
assert!(json.contains("password123"));
assert!(json.contains("firmware_version"));
assert!(json.contains("2.5.0"));
// Parse back to verify structure (not the exact management interface)
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 16);
assert_eq!(parsed["memory_size"], 64_000_000);
assert_eq!(parsed["network"][0]["name"], "");
}
#[test]
fn test_serialize_physical_host_with_dell_idrac() {
// Create a PhysicalHost with Dell iDRAC management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockDellIdrac {
hostname: "idrac-server01".to_string(),
port: 443,
api_token: "abcdef123456".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("env".to_string(), "production".to_string())],
memory_size: Some(128_000_000),
cpu_count: Some(32),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the Dell iDRAC details
assert!(json.contains("idrac-server01"));
assert!(json.contains("443"));
assert!(json.contains("abcdef123456"));
// Parse back to verify structure
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 32);
assert_eq!(parsed["memory_size"], 128_000_000);
assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null);
}
#[test]
fn test_different_management_implementations_produce_valid_json() {
// Create hosts with different management implementations
let host1 = PhysicalHost {
id: Id::empty(),
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockHPIlo {
ip: "10.0.0.1".to_string(),
username: "root".to_string(),
password: "secret".to_string(),
firmware_version: "3.0.0".to_string(),
}),
storage: vec![],
labels: vec![],
memory_modules: vec![],
cpus: vec![],
memory_size: None,
cpu_count: None,
};
let host2 = PhysicalHost {
id: Id::empty(),
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockDellIdrac {
hostname: "server02-idrac".to_string(),
port: 8443,
api_token: "token123".to_string(),
}),
storage: vec![],
labels: vec![],
memory_modules: vec![],
cpus: vec![],
memory_size: None,
cpu_count: None,
};
// Both should serialize successfully
@@ -378,5 +411,8 @@ mod tests {
// Both JSONs should be valid and parseable
let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1");
let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2");
// The JSONs should be different because they contain different management interfaces
assert_ne!(json1, json2);
}
}

View File

@@ -1,84 +0,0 @@
use once_cell::sync::Lazy;
use std::{collections::HashMap, sync::Mutex};
use crate::modules::application::ApplicationFeatureStatus;
use super::{
interpret::{InterpretError, Outcome},
topology::TopologyStatus,
};
#[derive(Debug, Clone)]
pub enum HarmonyEvent {
HarmonyStarted,
HarmonyFinished,
InterpretExecutionStarted {
execution_id: String,
topology: String,
interpret: String,
score: String,
message: String,
},
InterpretExecutionFinished {
execution_id: String,
topology: String,
interpret: String,
score: String,
outcome: Result<Outcome, InterpretError>,
},
TopologyStateChanged {
topology: String,
status: TopologyStatus,
message: Option<String>,
},
ApplicationFeatureStateChanged {
topology: String,
application: String,
feature: String,
status: ApplicationFeatureStatus,
},
}
type Subscriber = Box<dyn Fn(&HarmonyEvent) + Send + Sync>;
static SUBSCRIBERS: Lazy<Mutex<HashMap<String, Subscriber>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
/// Subscribes a listener to all instrumentation events.
///
/// Simply provide a unique name and a closure to run when an event happens.
///
/// # Example
/// ```
/// use harmony::instrumentation;
/// instrumentation::subscribe("my_logger", |event| {
/// println!("Event occurred: {:?}", event);
/// });
/// ```
pub fn subscribe<F>(name: &str, callback: F)
where
F: Fn(&HarmonyEvent) + Send + Sync + 'static,
{
let mut subs = SUBSCRIBERS.lock().unwrap();
subs.insert(name.to_string(), Box::new(callback));
}
/// Instruments an event, notifying all subscribers.
///
/// This will call every closure that was registered with `subscribe`.
///
/// # Example
/// ```
/// use harmony::instrumentation;
/// use harmony::instrumentation::HarmonyEvent;
/// instrumentation::instrument(HarmonyEvent::HarmonyStarted);
/// ```
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
let subs = SUBSCRIBERS.lock().unwrap();
for callback in subs.values() {
callback(&event);
}
Ok(())
}

View File

@@ -1,11 +1,12 @@
use harmony_types::id::Id;
use std::error::Error;
use async_trait::async_trait;
use derive_new::new;
use super::{
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
};
pub enum InterpretName {
@@ -22,17 +23,6 @@ pub enum InterpretName {
TenantInterpret,
Application,
ArgoCD,
Alerting,
Ntfy,
HelmChart,
HelmCommand,
K8sResource,
Lamp,
ApplicationMonitoring,
K8sPrometheusCrdAlerting,
DiscoverInventoryAgent,
CephClusterHealth,
Custom(&'static str),
}
impl std::fmt::Display for InterpretName {
@@ -51,17 +41,6 @@ impl std::fmt::Display for InterpretName {
InterpretName::TenantInterpret => f.write_str("Tenant"),
InterpretName::Application => f.write_str("Application"),
InterpretName::ArgoCD => f.write_str("ArgoCD"),
InterpretName::Alerting => f.write_str("Alerting"),
InterpretName::Ntfy => f.write_str("Ntfy"),
InterpretName::HelmChart => f.write_str("HelmChart"),
InterpretName::HelmCommand => f.write_str("HelmCommand"),
InterpretName::K8sResource => f.write_str("K8sResource"),
InterpretName::Lamp => f.write_str("LAMP"),
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
InterpretName::Custom(name) => f.write_str(name),
}
}
}
@@ -134,14 +113,6 @@ impl std::fmt::Display for InterpretError {
}
impl Error for InterpretError {}
impl From<PreparationError> for InterpretError {
fn from(value: PreparationError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<ExecutorError> for InterpretError {
fn from(value: ExecutorError) -> Self {
Self {

View File

@@ -1,6 +1,3 @@
mod repository;
pub use repository::*;
#[derive(Debug, new, Clone)]
pub struct InventoryFilter {
target: Vec<Filter>,
@@ -17,9 +14,6 @@ impl InventoryFilter {
use derive_new::new;
use log::info;
use serde::{Deserialize, Serialize};
use crate::hardware::{ManagementInterface, ManualManagementInterface};
use super::{
filter::Filter,
@@ -33,7 +27,7 @@ pub struct Inventory {
// Firewall is really just a host but with somewhat specialized hardware
// I'm not entirely sure it belongs to its own category but it helps make things easier and
// clearer for now so let's try it this way.
pub firewall_mgmt: Box<dyn ManagementInterface>,
pub firewall: FirewallGroup,
pub worker_host: HostGroup,
pub storage_host: HostGroup,
pub control_plane_host: HostGroup,
@@ -44,7 +38,7 @@ impl Inventory {
Self {
location: Location::new("Empty".to_string(), "location".to_string()),
switch: vec![],
firewall_mgmt: Box::new(ManualManagementInterface {}),
firewall: vec![],
worker_host: vec![],
storage_host: vec![],
control_plane_host: vec![],
@@ -55,18 +49,10 @@ impl Inventory {
Self {
location: Location::test_building(),
switch: SwitchGroup::new(),
firewall_mgmt: Box::new(ManualManagementInterface {}),
firewall: FirewallGroup::new(),
worker_host: HostGroup::new(),
storage_host: HostGroup::new(),
control_plane_host: HostGroup::new(),
}
}
}
#[derive(Debug, Serialize, Deserialize, sqlx::Type)]
pub enum HostRole {
Bootstrap,
ControlPlane,
Worker,
Storage,
}

View File

@@ -1,38 +0,0 @@
use async_trait::async_trait;
use crate::{hardware::PhysicalHost, interpret::InterpretError, inventory::HostRole};
/// Errors that can occur within the repository layer.
#[derive(thiserror::Error, Debug)]
pub enum RepoError {
#[error("Database query failed: {0}")]
QueryFailed(String),
#[error("Data serialization failed: {0}")]
Serialization(String),
#[error("Data deserialization failed: {0}")]
Deserialization(String),
#[error("Could not connect to the database: {0}")]
ConnectionFailed(String),
}
impl From<RepoError> for InterpretError {
fn from(value: RepoError) -> Self {
InterpretError::new(format!("Interpret error : {value}"))
}
}
// --- Trait and Implementation ---
/// Defines the contract for inventory persistence.
#[async_trait]
pub trait InventoryRepository: Send + Sync + 'static {
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>;
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>;
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError>;
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError>;
async fn save_role_mapping(
&self,
role: &HostRole,
host: &PhysicalHost,
) -> Result<(), RepoError>;
}

View File

@@ -1,14 +1,12 @@
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use log::{debug, warn};
use crate::topology::TopologyStatus;
use log::{info, warn};
use super::{
interpret::{InterpretError, Outcome},
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{PreparationError, PreparationOutcome, Topology, TopologyState},
topology::Topology,
};
type ScoreVec<T> = Vec<Box<dyn Score<T>>>;
@@ -17,7 +15,7 @@ pub struct Maestro<T: Topology> {
inventory: Inventory,
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_state: TopologyState,
topology_preparation_result: Mutex<Option<Outcome>>,
}
impl<T: Topology> Maestro<T> {
@@ -25,46 +23,36 @@ impl<T: Topology> Maestro<T> {
///
/// This should rarely be used. Most of the time Maestro::initialize should be used instead.
pub fn new_without_initialization(inventory: Inventory, topology: T) -> Self {
let topology_name = topology.name().to_string();
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_state: TopologyState::new(topology_name),
topology_preparation_result: None.into(),
}
}
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, PreparationError> {
let mut instance = Self::new_without_initialization(inventory, topology);
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let instance = Self::new_without_initialization(inventory, topology);
instance.prepare_topology().await?;
Ok(instance)
}
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
async fn prepare_topology(&mut self) -> Result<PreparationOutcome, PreparationError> {
self.topology_state.prepare();
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
info!("Ensuring topology '{}' is ready...", self.topology.name());
let outcome = self.topology.ensure_ready().await?;
info!(
"Topology '{}' readiness check complete: {}",
self.topology.name(),
outcome.status
);
let result = self.topology.ensure_ready().await;
match result {
Ok(outcome) => {
match outcome.clone() {
PreparationOutcome::Success { details } => {
self.topology_state.success(details);
}
PreparationOutcome::Noop => {
self.topology_state.noop();
}
};
Ok(outcome)
}
Err(err) => {
self.topology_state.error(err.to_string());
Err(err)
}
}
self.topology_preparation_result
.lock()
.unwrap()
.replace(outcome.clone());
Ok(outcome)
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
@@ -73,8 +61,15 @@ impl<T: Topology> Maestro<T> {
}
fn is_topology_initialized(&self) -> bool {
self.topology_state.status == TopologyStatus::Success
|| self.topology_state.status == TopologyStatus::Noop
let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() {
match outcome.status {
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else {
false
}
}
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
@@ -85,9 +80,11 @@ impl<T: Topology> Maestro<T> {
self.topology.name(),
);
}
debug!("Interpreting score {score:?}");
let result = score.interpret(&self.inventory, &self.topology).await;
debug!("Got result {result:?}");
info!("Running score {score:?}");
let interpret = score.create_interpret();
info!("Launching interpret {interpret:?}");
let result = interpret.execute(&self.inventory, &self.topology).await;
info!("Got result {result:?}");
result
}

View File

@@ -3,7 +3,6 @@ pub mod data;
pub mod executors;
pub mod filter;
pub mod hardware;
pub mod instrumentation;
pub mod interpret;
pub mod inventory;
pub mod maestro;

View File

@@ -1,62 +1,22 @@
use harmony_types::id::Id;
use std::collections::BTreeMap;
use async_trait::async_trait;
use serde::Serialize;
use serde_value::Value;
use super::{
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, Outcome},
inventory::Inventory,
topology::Topology,
};
use super::{interpret::Interpret, topology::Topology};
#[async_trait]
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
async fn interpret(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let id = Id::default();
let interpret = self.create_interpret();
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
message: format!("{} running...", interpret.get_name()),
})
.unwrap();
let result = interpret.execute(inventory, topology).await;
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
outcome: result.clone(),
})
.unwrap();
result
}
fn name(&self) -> String;
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
}
pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value;
}
impl<S, T> SerializeScore<T> for S
impl<'de, S, T> SerializeScore<T> for S
where
T: Topology,
S: Score<T> + Serialize,
@@ -64,7 +24,7 @@ where
fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble
// up?
serde_value::to_value(self).expect("Score should serialize successfully")
serde_value::to_value(&self).expect("Score should serialize successfully")
}
}

View File

@@ -1,13 +1,15 @@
use async_trait::async_trait;
use harmony_macros::ip;
use harmony_types::net::MacAddress;
use harmony_types::net::Url;
use log::debug;
use log::error;
use log::info;
use crate::data::FileContent;
use crate::config::Config;
use crate::executors::ExecutorError;
use crate::topology::PxeOptions;
use crate::interpret::InterpretError;
use crate::interpret::Outcome;
use crate::inventory::Inventory;
use crate::topology::upgradeable::Upgradeable;
use super::DHCPStaticEntry;
use super::DhcpServer;
@@ -21,16 +23,18 @@ use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::PreparationError;
use super::PreparationOutcome;
use super::Router;
use super::TftpServer;
use super::Topology;
use super::Url;
use super::k8s::K8sClient;
use std::fmt::Debug;
use std::net::IpAddr;
use std::str::FromStr;
use std::sync::Arc;
#[derive(Debug, Clone)]
#[derive(Clone, Debug)]
pub struct HAClusterTopology {
pub domain_name: String,
pub router: Arc<dyn Router>,
@@ -51,11 +55,16 @@ impl Topology for HAClusterTopology {
fn name(&self) -> &str {
"HAClusterTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
debug!(
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
error!(
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
);
Ok(PreparationOutcome::Noop)
let config = Config::load().expect("couldn't load config");
if config.run_upgrades {
self.upgrade(&Inventory::empty(), self).await?;
}
Ok(Outcome::success("for now do nothing".to_string()))
}
}
@@ -157,18 +166,12 @@ impl DhcpServer for HAClusterTopology {
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
self.dhcp_server.list_static_mappings().await
}
async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> {
self.dhcp_server.set_pxe_options(options).await
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
self.dhcp_server.set_next_server(ip).await
}
async fn set_dhcp_range(
&self,
start: &IpAddress,
end: &IpAddress,
) -> Result<(), ExecutorError> {
self.dhcp_server.set_dhcp_range(start, end).await
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_boot_filename(boot_filename).await
}
fn get_ip(&self) -> IpAddress {
self.dhcp_server.get_ip()
}
@@ -178,6 +181,16 @@ impl DhcpServer for HAClusterTopology {
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.dhcp_server.commit_config().await
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename(filename).await
}
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename64(filename64).await
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filenameipxe(filenameipxe).await
}
}
#[async_trait]
@@ -221,21 +234,17 @@ impl HttpServer for HAClusterTopology {
self.http_server.serve_files(url).await
}
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
self.http_server.serve_file_content(file).await
}
fn get_ip(&self) -> IpAddress {
self.http_server.get_ip()
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.http_server.ensure_initialized().await
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.http_server.commit_config().await
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.http_server.reload_restart().await
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
@@ -245,15 +254,20 @@ pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
"DummyInfra"
todo!()
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(PreparationOutcome::Success {
details: dummy_msg.into(),
})
Ok(Outcome::success(dummy_msg.to_string()))
}
}
#[async_trait]
impl<T: Topology> Upgradeable<T> for DummyInfra {
async fn upgrade(&self, _inventory: &Inventory, _topology: &T) -> Result<(), InterpretError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
@@ -303,14 +317,19 @@ impl DhcpServer for DummyInfra {
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> {
async fn set_next_server(&self, _ip: IpAddress) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_dhcp_range(
&self,
start: &IpAddress,
end: &IpAddress,
) -> Result<(), ExecutorError> {
async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filename64(&self, _filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filenameipxe(&self, _filenameipxe: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
@@ -380,9 +399,6 @@ impl HttpServer for DummyInfra {
async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
@@ -421,3 +437,12 @@ impl DnsServer for DummyInfra {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]
impl<T: Topology> Upgradeable<T> for HAClusterTopology {
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
error!("TODO implement upgrades for all parts of the cluster");
self.firewall.upgrade(inventory, topology).await?;
Ok(())
}
}

View File

@@ -1,12 +1,11 @@
use crate::{data::FileContent, executors::ExecutorError};
use crate::executors::ExecutorError;
use async_trait::async_trait;
use harmony_types::net::IpAddress;
use harmony_types::net::Url;
use super::{IpAddress, Url};
#[async_trait]
pub trait HttpServer: Send + Sync {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
// async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError>;

View File

@@ -1,11 +1,12 @@
use derive_new::new;
use futures_util::StreamExt;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod},
};
use kube::{
Client, Config, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
runtime::reflector::Lookup,
@@ -16,25 +17,16 @@ use kube::{
runtime::wait::await_condition,
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::json;
use similar::TextDiff;
use tokio::io::AsyncReadExt;
use serde::de::DeserializeOwned;
use similar::{DiffableStr, TextDiff};
use crate::config::Config as HarmonyConfig;
#[derive(new, Clone)]
pub struct K8sClient {
client: Client,
}
impl Serialize for K8sClient {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}
impl std::fmt::Debug for K8sClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// This is a poor man's debug implementation for now as kube::Client does not provide much
@@ -53,66 +45,6 @@ impl K8sClient {
})
}
pub async fn get_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<Option<Deployment>, Error> {
let deps: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(deps.get_opt(name).await?)
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
let pods: Api<Pod> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
}
pub async fn scale_deployment(
&self,
name: &str,
namespace: Option<&str>,
replicas: u32,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let patch = json!({
"spec": {
"replicas": replicas
}
});
let pp = PatchParams::default();
let scale = Patch::Apply(&patch);
deployments.patch_scale(name, &pp, &scale).await?;
Ok(())
}
pub async fn delete_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let delete_params = DeleteParams::default();
deployments.delete(name, &delete_params).await?;
Ok(())
}
pub async fn wait_until_deployment_ready(
&self,
name: String,
@@ -128,78 +60,13 @@ impl K8sClient {
}
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
let t = timeout.unwrap_or(300);
let t = if let Some(t) = timeout { t } else { 300 };
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
if res.is_ok() {
Ok(())
if let Ok(r) = res {
return Ok(());
} else {
Err("timed out while waiting for deployment".to_string())
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
&self,
name: String,
label: String,
namespace: Option<&str>,
command: Vec<&str>,
) -> Result<String, String> {
let api: Api<Pod>;
if let Some(ns) = namespace {
api = Api::namespaced(self.client.clone(), ns);
} else {
api = Api::default_namespaced(self.client.clone());
}
let pod_list = api
.list(&ListParams::default().labels(format!("{label}={name}").as_str()))
.await
.expect("couldn't get list of pods");
let res = api
.exec(
pod_list
.items
.first()
.expect("couldn't get pod")
.name()
.expect("couldn't get pod name")
.into_owned()
.as_str(),
command,
&AttachParams::default().stdout(true).stderr(true),
)
.await;
match res {
Err(e) => Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
.expect("Couldn't get status")
.await
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout().take() {
stdout
.read_to_string(&mut stdout_buf)
.await
.map_err(|e| format!("Failed to get status stdout {e}"))?;
}
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" {
Ok(stdout_buf)
} else {
Err(s)
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
}
}
return Err("timed out while waiting for deployment".to_string());
}
}
@@ -238,7 +105,7 @@ impl K8sClient {
.await;
match res {
Err(e) => Err(e.to_string()),
Err(e) => return Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
@@ -247,10 +114,14 @@ impl K8sClient {
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" { Ok(()) } else { Err(s) }
debug!("Status: {}", s);
if s == "Success" {
return Ok(());
} else {
return Err(s);
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
return Err("Couldn't get inner status of pod exec".to_string());
}
}
}
@@ -285,15 +156,16 @@ impl K8sClient {
.as_ref()
.expect("K8s Resource should have a name");
if *crate::config::DRY_RUN {
let config = HarmonyConfig::load().expect("couldn't load config");
if config.dry_run {
match api.get(name).await {
Ok(current) => {
trace!("Received current value {current:#?}");
// The resource exists, so we calculate and display a diff.
println!("\nPerforming dry-run for resource: '{}'", name);
let mut current_yaml = serde_yaml::to_value(&current).unwrap_or_else(|_| {
panic!("Could not serialize current value : {current:#?}")
});
let mut current_yaml = serde_yaml::to_value(&current)
.expect(&format!("Could not serialize current value : {current:#?}"));
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
let map = current_yaml.as_mapping_mut().unwrap();
let removed = map.remove_entry("status");
@@ -360,7 +232,7 @@ impl K8sClient {
}
}
pub async fn apply_many<K>(&self, resource: &[K], ns: Option<&str>) -> Result<Vec<K>, Error>
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as Resource>::Scope: ApplyStrategy<K>,
@@ -376,7 +248,7 @@ impl K8sClient {
pub async fn apply_yaml_many(
&self,
#[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>,
yaml: &Vec<serde_yaml::Value>,
ns: Option<&str>,
) -> Result<(), Error> {
for y in yaml.iter() {

View File

@@ -1,46 +1,31 @@
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use figment::{Figment, providers::Env};
use inquire::Confirm;
use log::{debug, info, warn};
use serde::Serialize;
use serde::{Deserialize, Serialize};
use tokio::sync::OnceCell;
use crate::{
executors::ExecutorError,
interpret::InterpretStatus,
interpret::{InterpretError, Outcome},
inventory::Inventory,
modules::{
k3d::K3DInstallationScore,
monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus,
prometheus_operator::prometheus_operator_helm_chart_score,
},
prometheus::{
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
prometheus::PrometheusApplicationMonitoring,
},
},
score::Score,
maestro::Maestro,
modules::k3d::K3DInstallationScore,
topology::LocalhostTopology,
};
use super::{
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
PreparationOutcome, Topology,
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology,
k8s::K8sClient,
oberservability::monitoring::AlertReceiver,
tenant::{
TenantConfig, TenantManager,
k8s::K8sTenantManager,
network_policy::{
K3dNetworkPolicyStrategy, NetworkPolicyStrategy, NoopNetworkPolicyStrategy,
},
},
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
};
#[derive(Clone, Debug)]
struct K8sState {
client: Arc<K8sClient>,
source: K8sSource,
_source: K8sSource,
message: String,
}
@@ -74,42 +59,8 @@ impl K8sclient for K8sAnywhereTopology {
}
}
#[async_trait]
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
async fn install_prometheus(
&self,
sender: &CRDPrometheus,
inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> Result<PreparationOutcome, PreparationError> {
let po_result = self.ensure_prometheus_operator(sender).await?;
if po_result == PreparationOutcome::Noop {
debug!("Skipping Prometheus CR installation due to missing operator.");
return Ok(po_result);
}
let result = self
.get_k8s_prometheus_application_score(sender.clone(), receivers)
.await
.interpret(inventory, self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: outcome.message,
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
}
}
impl Serialize for K8sAnywhereTopology {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
@@ -134,19 +85,6 @@ impl K8sAnywhereTopology {
}
}
async fn get_k8s_prometheus_application_score(
&self,
sender: CRDPrometheus,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> K8sPrometheusCRDAlertingScore {
K8sPrometheusCRDAlertingScore {
sender,
receivers: receivers.unwrap_or_default(),
service_monitors: vec![],
prometheus_rules: vec![],
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
@@ -157,8 +95,9 @@ impl K8sAnywhereTopology {
return Err("Failed to run 'helm -version'".to_string());
}
// Print the version output
let version_output = String::from_utf8_lossy(&version_result.stdout);
debug!("Helm version: {}", version_output.trim());
println!("Helm version: {}", version_output.trim());
Ok(())
}
@@ -175,42 +114,33 @@ impl K8sAnywhereTopology {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), PreparationError> {
let result = self
.get_k3d_installation_score()
.interpret(&Inventory::empty(), self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(()),
InterpretStatus::NOOP => Ok(()),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
let maestro = Maestro::initialize(Inventory::autoload(), LocalhostTopology::new()).await?;
let k3d_score = self.get_k3d_installation_score();
maestro.interpret(Box::new(k3d_score)).await?;
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, PreparationError> {
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
let k8s_anywhere_config = &self.config;
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
// be careful when making modifications here
if k8s_anywhere_config.use_local_k3d {
debug!("Using local k3d cluster because of use_local_k3d set to true");
info!("Using local k3d cluster because of use_local_k3d set to true");
} else {
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
debug!("Loading kubeconfig {kubeconfig}");
match self.try_load_kubeconfig(kubeconfig).await {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(client) => {
return Ok(Some(K8sState {
client: Arc::new(client),
source: K8sSource::Kubeconfig,
_source: K8sSource::Kubeconfig,
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
}));
}
None => {
return Err(PreparationError::new(format!(
return Err(InterpretError::new(format!(
"Failed to load kubeconfig from {kubeconfig}"
)));
}
@@ -229,13 +159,22 @@ impl K8sAnywhereTopology {
}
if !k8s_anywhere_config.autoinstall {
warn!(
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
);
return Ok(None);
debug!("Autoinstall confirmation prompt");
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
debug!("Autoinstall confirmation {confirmation}");
if !confirmation {
warn!(
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
);
return Ok(None);
}
}
debug!("Starting K8sAnywhere installation");
info!("Starting K8sAnywhere installation");
self.try_install_k3d().await?;
let k3d_score = self.get_k3d_installation_score();
// I feel like having to rely on the k3d_rs crate here is a smell
@@ -247,8 +186,8 @@ impl K8sAnywhereTopology {
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),
source: K8sSource::LocalK3d,
message: "K8s client ready".to_string(),
_source: K8sSource::LocalK3d,
message: "Successfully installed K3D cluster and acquired client".to_string(),
},
Err(_) => todo!(),
};
@@ -256,21 +195,15 @@ impl K8sAnywhereTopology {
Ok(Some(state))
}
async fn ensure_k8s_tenant_manager(&self, k8s_state: &K8sState) -> Result<(), String> {
if self.tenant_manager.get().is_some() {
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
if let Some(_) = self.tenant_manager.get() {
return Ok(());
}
self.tenant_manager
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
let k8s_client = self.k8s_client().await?;
let network_policy_strategy: Box<dyn NetworkPolicyStrategy> = match k8s_state.source
{
K8sSource::LocalK3d => Box::new(K3dNetworkPolicyStrategy::new()),
K8sSource::Kubeconfig => Box::new(NoopNetworkPolicyStrategy::new()),
};
Ok(K8sTenantManager::new(k8s_client, network_policy_strategy))
Ok(K8sTenantManager::new(k8s_client))
})
.await?;
@@ -285,58 +218,9 @@ impl K8sAnywhereTopology {
)),
}
}
async fn ensure_prometheus_operator(
&self,
sender: &CRDPrometheus,
) -> Result<PreparationOutcome, PreparationError> {
let status = Command::new("sh")
.args(["-c", "kubectl get crd -A | grep -i prometheuses"])
.status()
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
if !status.success() {
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => {
debug!("installing prometheus operator");
let op_score =
prometheus_operator_helm_chart_score(sender.namespace.clone());
let result = op_score.interpret(&Inventory::empty(), self).await;
return match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: "installed prometheus operator".into(),
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(
"failed to install prometheus operator (unknown error)".into(),
)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
};
}
K8sSource::Kubeconfig => {
debug!("unable to install prometheus operator, contact cluster admin");
return Ok(PreparationOutcome::Noop);
}
}
} else {
warn!("Unable to detect k8s_state. Skipping Prometheus Operator install.");
return Ok(PreparationOutcome::Noop);
}
}
debug!("Prometheus operator is already present, skipping install");
Ok(PreparationOutcome::Success {
details: "prometheus operator present in cluster".into(),
})
}
}
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Deserialize)]
pub struct K8sAnywhereConfig {
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
/// cluster
@@ -354,7 +238,7 @@ pub struct K8sAnywhereConfig {
///
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
///
/// Default: true
/// Default: false
pub autoinstall: bool,
/// Whether to use local k3d cluster.
@@ -363,25 +247,29 @@ pub struct K8sAnywhereConfig {
///
/// default: true
pub use_local_k3d: bool,
pub harmony_profile: String,
pub profile: String,
}
impl Default for K8sAnywhereConfig {
fn default() -> Self {
Self {
kubeconfig: None,
use_system_kubeconfig: false,
autoinstall: false,
// TODO harmony_profile should be managed at a more core level than this
profile: "dev".to_string(),
use_local_k3d: true,
}
}
}
impl K8sAnywhereConfig {
fn from_env() -> Self {
Self {
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(false)),
// TODO harmony_profile should be managed at a more core level than this
harmony_profile: std::env::var("HARMONY_PROFILE").map_or_else(
|_| "dev".to_string(),
|v| v.parse().ok().unwrap_or("dev".to_string()),
),
use_local_k3d: std::env::var("HARMONY_USE_LOCAL_K3D")
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(true)),
}
Figment::new()
.merge(Env::prefixed("HARMONY_"))
.merge(Env::raw().only(&["KUBECONFIG"]))
.extract()
.expect("couldn't load config from env")
}
}
@@ -391,25 +279,26 @@ impl Topology for K8sAnywhereTopology {
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(PreparationError::new(
"no K8s client could be found or installed".to_string(),
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
self.ensure_k8s_tenant_manager(k8s_state)
self.ensure_k8s_tenant_manager()
.await
.map_err(PreparationError::new)?;
.map_err(|e| InterpretError::new(e))?;
match self.is_helm_available() {
Ok(()) => Ok(PreparationOutcome::Success {
details: format!("{} + helm available", k8s_state.message.clone()),
}),
Err(e) => Err(PreparationError::new(format!("helm unavailable: {}", e))),
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}
@@ -420,7 +309,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
return DeploymentTarget::LocalDev;
}
match self.config.harmony_profile.to_lowercase().as_str() {
match self.config.profile.to_lowercase().as_str() {
"staging" => DeploymentTarget::Staging,
"production" => DeploymentTarget::Production,
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),

View File

@@ -4,9 +4,8 @@ use async_trait::async_trait;
use log::debug;
use serde::Serialize;
use super::LogicalHost;
use super::{IpAddress, LogicalHost};
use crate::executors::ExecutorError;
use harmony_types::net::IpAddress;
impl std::fmt::Debug for dyn LoadBalancer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {

View File

@@ -1,7 +1,9 @@
use async_trait::async_trait;
use derive_new::new;
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
use crate::interpret::{InterpretError, Outcome};
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
@@ -12,10 +14,10 @@ impl Topology for LocalhostTopology {
"LocalHostTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
Ok(PreparationOutcome::Success {
details: "Localhost is Chuck Norris, always ready.".into(),
})
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
Ok(Outcome::success(
"Localhost is Chuck Norris, always ready.".to_string(),
))
}
}

View File

@@ -1,5 +1,4 @@
mod ha_cluster;
use harmony_types::net::IpAddress;
mod host_binding;
mod http;
pub mod installable;
@@ -7,7 +6,7 @@ mod k8s_anywhere;
mod localhost;
pub mod oberservability;
pub mod tenant;
use derive_new::new;
pub mod upgradeable;
pub use k8s_anywhere::*;
pub use localhost::*;
pub mod k8s;
@@ -28,11 +27,9 @@ pub use tftp::*;
mod helm_command;
pub use helm_command::*;
use super::{
executors::ExecutorError,
instrumentation::{self, HarmonyEvent},
};
use std::error::Error;
use std::net::IpAddr;
use super::interpret::{InterpretError, Outcome};
/// Represents a logical view of an infrastructure environment providing specific capabilities.
///
@@ -61,128 +58,9 @@ pub trait Topology: Send + Sync {
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
///
/// # Returns
/// - `Ok(PreparationOutcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(PreparationError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError>;
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PreparationOutcome {
Success { details: String },
Noop,
}
#[derive(Debug, Clone, new)]
pub struct PreparationError {
msg: String,
}
impl std::fmt::Display for PreparationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.msg)
}
}
impl Error for PreparationError {}
impl From<ExecutorError> for PreparationError {
fn from(value: ExecutorError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<kube::Error> for PreparationError {
fn from(value: kube::Error) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
impl From<String> for PreparationError {
fn from(value: String) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum TopologyStatus {
Queued,
Preparing,
Success,
Noop,
Error,
}
pub struct TopologyState {
pub topology: String,
pub status: TopologyStatus,
}
impl TopologyState {
pub fn new(topology: String) -> Self {
let instance = Self {
topology,
status: TopologyStatus::Queued,
};
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: instance.topology.clone(),
status: instance.status.clone(),
message: None,
})
.unwrap();
instance
}
pub fn prepare(&mut self) {
self.status = TopologyStatus::Preparing;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn success(&mut self, message: String) {
self.status = TopologyStatus::Success;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
pub fn noop(&mut self) {
self.status = TopologyStatus::Noop;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn error(&mut self, message: String) {
self.status = TopologyStatus::Error;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
}
#[derive(Debug)]
@@ -196,6 +74,35 @@ pub trait MultiTargetTopology: Topology {
fn current_target(&self) -> DeploymentTarget;
}
pub type IpAddress = IpAddr;
#[derive(Debug, Clone)]
pub enum Url {
LocalFolder(String),
Url(url::Url),
}
impl Serialize for Url {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(&url.as_str()),
}
}
}
impl std::fmt::Display for Url {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Url::LocalFolder(path) => write!(f, "{}", path),
Url::Url(url) => write!(f, "{}", url),
}
}
}
/// Represents a logical member of a cluster that provides one or more services.
///
/// A LogicalHost can represent various roles within the infrastructure, such as:
@@ -234,8 +141,7 @@ impl LogicalHost {
///
/// ```
/// use std::str::FromStr;
/// use harmony::topology::{LogicalHost};
/// use harmony_types::net::IpAddress;
/// use harmony::topology::{IpAddress, LogicalHost};
///
/// let start_ip = IpAddress::from_str("192.168.0.20").unwrap();
/// let hosts = LogicalHost::create_hosts(3, start_ip, "worker");
@@ -291,7 +197,7 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> {
#[cfg(test)]
mod tests {
use harmony_types::net::Url;
use super::*;
use serde_json;
#[test]

View File

@@ -1,12 +1,18 @@
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
use async_trait::async_trait;
use harmony_types::net::{IpAddress, MacAddress};
use harmony_types::net::MacAddress;
use log::debug;
use serde::Serialize;
use crate::executors::ExecutorError;
use crate::{
executors::ExecutorError,
interpret::InterpretError,
inventory::Inventory,
topology::{Topology, upgradeable::Upgradeable},
};
use super::{LogicalHost, k8s::K8sClient};
use super::{IpAddress, LogicalHost, k8s::K8sClient};
#[derive(Debug)]
pub struct DHCPStaticEntry {
@@ -38,6 +44,15 @@ impl std::fmt::Debug for dyn Firewall {
}
}
// #[async_trait]
// impl<T: Topology> Upgradeable<T> for dyn Firewall {
// async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
// debug!("upgrading");
// self.upgrade(inventory, topology).await?;
// Ok(())
// }
// }
pub struct NetworkDomain {
pub name: String,
}
@@ -46,21 +61,16 @@ pub trait K8sclient: Send + Sync {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
}
pub struct PxeOptions {
pub ipxe_filename: String,
pub bios_filename: String,
pub efi_filename: String,
pub tftp_ip: Option<IpAddress>,
}
#[async_trait]
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>;
async fn set_dhcp_range(&self, start: &IpAddress, end: &IpAddress)
-> Result<(), ExecutorError>;
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>;
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>;
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>;
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError>;
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
async fn commit_config(&self) -> Result<(), ExecutorError>;

View File

@@ -1,15 +1,12 @@
use std::any::Any;
use async_trait::async_trait;
use log::debug;
use crate::{
data::Version,
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
topology::{Topology, installable::Installable},
};
use harmony_types::id::Id;
#[async_trait]
pub trait AlertSender: Send + Sync + std::fmt::Debug {
@@ -46,7 +43,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
}
fn get_name(&self) -> InterpretName {
InterpretName::Alerting
todo!()
}
fn get_version(&self) -> Version {
@@ -65,9 +62,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
#[async_trait]
pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
fn as_any(&self) -> &dyn Any;
}
#[async_trait]
@@ -77,6 +72,6 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
}
#[async_trait]
pub trait ScrapeTarget<S: AlertSender> {
pub trait ScrapeTarger<S: AlertSender> {
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
}

View File

@@ -27,11 +27,11 @@ pub struct UnmanagedRouter {
impl Router for UnmanagedRouter {
fn get_gateway(&self) -> IpAddress {
self.gateway
self.gateway.clone()
}
fn get_cidr(&self) -> Ipv4Cidr {
self.cidr
self.cidr.clone()
}
fn get_host(&self) -> LogicalHost {

View File

@@ -15,38 +15,36 @@ use k8s_openapi::{
apimachinery::pkg::util::intstr::IntOrString,
};
use kube::Resource;
use log::debug;
use log::{debug, info, warn};
use serde::de::DeserializeOwned;
use serde_json::json;
use tokio::sync::OnceCell;
use super::{TenantConfig, TenantManager, network_policy::NetworkPolicyStrategy};
use super::{TenantConfig, TenantManager};
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct K8sTenantManager {
k8s_client: Arc<K8sClient>,
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
}
impl K8sTenantManager {
pub fn new(
client: Arc<K8sClient>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
) -> Self {
pub fn new(client: Arc<K8sClient>) -> Self {
Self {
k8s_client: client,
k8s_tenant_config: Arc::new(OnceCell::new()),
network_policy_strategy,
}
}
}
impl K8sTenantManager {
fn get_namespace_name(&self, config: &TenantConfig) -> String {
config.name.clone()
}
fn ensure_constraints(&self, _namespace: &Namespace) -> Result<(), ExecutorError> {
// TODO: Ensure constraints are applied to namespace (https://git.nationtech.io/NationTech/harmony/issues/98)
warn!("Validate that when tenant already exists (by id) that name has not changed");
warn!("Make sure other Tenant constraints are respected by this k8s implementation");
Ok(())
}
@@ -221,6 +219,24 @@ impl K8sTenantManager {
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "10.43.0.1/32",
}
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "172.23.0.0/16",
}
}
]
},
{
"to": [
{
@@ -288,19 +304,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -345,19 +361,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -390,27 +406,12 @@ impl K8sTenantManager {
}
}
impl Clone for K8sTenantManager {
fn clone(&self) -> Self {
Self {
k8s_client: self.k8s_client.clone(),
k8s_tenant_config: self.k8s_tenant_config.clone(),
network_policy_strategy: self.network_policy_strategy.clone_box(),
}
}
}
#[async_trait]
impl TenantManager for K8sTenantManager {
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
let namespace = self.build_namespace(config)?;
let resource_quota = self.build_resource_quota(config)?;
let network_policy = self.build_network_policy(config)?;
let network_policy = self
.network_policy_strategy
.adjust_policy(network_policy, config);
let resource_limit_range = self.build_limit_range(config)?;
self.ensure_constraints(&namespace)?;
@@ -427,14 +428,13 @@ impl TenantManager for K8sTenantManager {
debug!("Creating network_policy for tenant {}", config.name);
self.apply_resource(network_policy, config).await?;
debug!(
info!(
"Success provisionning K8s tenant id {} name {}",
config.id, config.name
);
self.store_config(config);
Ok(())
}
async fn get_tenant_config(&self) -> Option<TenantConfig> {
self.k8s_tenant_config.get().cloned()
}

View File

@@ -1,11 +1,11 @@
pub mod k8s;
mod manager;
pub mod network_policy;
use std::str::FromStr;
use harmony_types::id::Id;
pub use manager::*;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use crate::data::Id;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
pub struct TenantConfig {

View File

@@ -1,120 +0,0 @@
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::TenantConfig;
pub trait NetworkPolicyStrategy: Send + Sync + std::fmt::Debug {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy>;
fn adjust_policy(&self, policy: NetworkPolicy, config: &TenantConfig) -> NetworkPolicy;
}
#[derive(Clone, Debug)]
pub struct NoopNetworkPolicyStrategy {}
impl NoopNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for NoopNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for NoopNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
policy
}
}
#[derive(Clone, Debug)]
pub struct K3dNetworkPolicyStrategy {}
impl K3dNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for K3dNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for K3dNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
let mut egress = policy
.spec
.clone()
.unwrap_or_default()
.egress
.clone()
.unwrap_or_default();
egress.push(NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(), // TODO: query the IP range https://git.nationtech.io/NationTech/harmony/issues/108
..Default::default()
}),
..Default::default()
}]),
..Default::default()
});
NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(egress),
..policy.spec.unwrap_or_default()
}),
..policy
}
}
}
#[cfg(test)]
mod tests {
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::{K3dNetworkPolicyStrategy, NetworkPolicyStrategy};
#[test]
pub fn should_add_ip_block_for_k3d_harmony_server() {
let strategy = K3dNetworkPolicyStrategy::new();
let policy =
strategy.adjust_policy(NetworkPolicy::default(), &super::TenantConfig::default());
let expected_policy = NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(vec![NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(),
..Default::default()
}),
..Default::default()
}]),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
assert_eq!(expected_policy, policy);
}
}

View File

@@ -1,7 +1,7 @@
use crate::executors::ExecutorError;
use async_trait::async_trait;
use harmony_types::net::{IpAddress, Url};
use super::{IpAddress, Url};
#[async_trait]
pub trait TftpServer: Send + Sync {

View File

@@ -0,0 +1,8 @@
use async_trait::async_trait;
use crate::{interpret::InterpretError, inventory::Inventory};
#[async_trait]
pub trait Upgradeable<T>: Send + Sync {
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>;
}

View File

@@ -3,9 +3,11 @@ use std::sync::Arc;
use russh::{client, keys::key};
use crate::domain::executors::{ExecutorError, SshClient};
use crate::{
domain::executors::{ExecutorError, SshClient},
topology::IpAddress,
};
use harmony_types::net::IpAddress;
pub struct RusshClient;
#[async_trait]

View File

@@ -1,6 +1,6 @@
use crate::hardware::ManagementInterface;
use crate::topology::IpAddress;
use derive_new::new;
use harmony_types::net::IpAddress;
use harmony_types::net::MacAddress;
use log::info;
use serde::Serialize;

View File

@@ -1,17 +0,0 @@
use crate::{
config::DATABASE_URL,
infra::inventory::sqlite::SqliteInventoryRepository,
inventory::{InventoryRepository, RepoError},
};
pub mod sqlite;
pub struct InventoryRepositoryFactory;
impl InventoryRepositoryFactory {
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
Ok(Box::new(
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
))
}
}

View File

@@ -1,148 +0,0 @@
use crate::{
hardware::PhysicalHost,
inventory::{HostRole, InventoryRepository, RepoError},
};
use async_trait::async_trait;
use harmony_types::id::Id;
use log::info;
use sqlx::{Pool, Sqlite, SqlitePool};
/// A thread-safe, connection-pooled repository using SQLite.
#[derive(Debug)]
pub struct SqliteInventoryRepository {
pool: Pool<Sqlite>,
}
impl SqliteInventoryRepository {
pub async fn new(database_url: &str) -> Result<Self, RepoError> {
let pool = SqlitePool::connect(database_url)
.await
.map_err(|e| RepoError::ConnectionFailed(e.to_string()))?;
info!("SQLite inventory repository initialized at '{database_url}'");
Ok(Self { pool })
}
}
#[async_trait]
impl InventoryRepository for SqliteInventoryRepository {
async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError> {
let data = serde_json::to_vec(host).map_err(|e| RepoError::Serialization(e.to_string()))?;
let id = Id::default().to_string();
let host_id = host.id.to_string();
sqlx::query!(
"INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)",
host_id,
id,
data,
)
.execute(&self.pool)
.await?;
info!("Saved new inventory version for host '{}'", host.id);
Ok(())
}
async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> {
let row = sqlx::query_as!(
DbHost,
r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#,
host_id
)
.fetch_optional(&self.pool)
.await?;
Ok(row.map(|r| r.data.0))
}
async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError> {
let db_hosts = sqlx::query_as!(
DbHost,
r#"
SELECT
p1.id,
p1.version_id,
p1.data as "data: Json<PhysicalHost>"
FROM
physical_hosts p1
INNER JOIN (
SELECT
id,
MAX(version_id) AS max_version
FROM
physical_hosts
GROUP BY
id
) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version
"#
)
.fetch_all(&self.pool)
.await?;
let hosts = db_hosts.into_iter().map(|row| row.data.0).collect();
Ok(hosts)
}
async fn save_role_mapping(
&self,
role: &HostRole,
host: &PhysicalHost,
) -> Result<(), RepoError> {
let host_id = host.id.to_string();
sqlx::query!(
r#"
INSERT INTO host_role_mapping (host_id, role)
VALUES (?, ?)
"#,
host_id,
role
)
.execute(&self.pool)
.await?;
info!("Saved role mapping for host '{}' as '{:?}'", host.id, role);
Ok(())
}
async fn get_host_for_role(&self, role: HostRole) -> Result<Vec<PhysicalHost>, RepoError> {
struct HostIdRow {
host_id: String,
}
let role_str = format!("{:?}", role);
let host_id_rows = sqlx::query_as!(
HostIdRow,
"SELECT host_id FROM host_role_mapping WHERE role = ?",
role_str
)
.fetch_all(&self.pool)
.await?;
let mut hosts = Vec::with_capacity(host_id_rows.len());
for row in host_id_rows {
match self.get_latest_by_id(&row.host_id).await? {
Some(host) => hosts.push(host),
None => {
log::warn!(
"Found a role mapping for host_id '{}', but the host does not exist in the physical_hosts table. This may indicate a data integrity issue.",
row.host_id
);
}
}
}
Ok(hosts)
}
}
use sqlx::types::Json;
struct DbHost {
data: Json<PhysicalHost>,
id: String,
version_id: String,
}

View File

@@ -1,6 +1,4 @@
pub mod executors;
pub mod hp_ilo;
pub mod intel_amt;
pub mod inventory;
pub mod opnsense;
mod sqlx;

View File

@@ -1,14 +1,13 @@
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use log::info;
use log::debug;
use crate::{
executors::ExecutorError,
topology::{DHCPStaticEntry, DhcpServer, LogicalHost, PxeOptions},
topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost},
};
use super::OPNSenseFirewall;
use harmony_types::net::IpAddress;
#[async_trait]
impl DhcpServer for OPNSenseFirewall {
@@ -27,7 +26,7 @@ impl DhcpServer for OPNSenseFirewall {
.unwrap();
}
info!("Registered {:?}", entry);
debug!("Registered {:?}", entry);
Ok(())
}
@@ -47,40 +46,57 @@ impl DhcpServer for OPNSenseFirewall {
self.host.clone()
}
async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> {
let mut writable_opnsense = self.opnsense_config.write().await;
let PxeOptions {
ipxe_filename,
bios_filename,
efi_filename,
tftp_ip,
} = options;
writable_opnsense
.dhcp()
.set_pxe_options(
tftp_ip.map(|i| i.to_string()),
bios_filename,
efi_filename,
ipxe_filename,
)
.await
.map_err(|dhcp_error| {
ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}"))
})
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
let ipv4 = match ip {
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
std::net::IpAddr::V6(_) => todo!("ipv6 not supported yet"),
};
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_next_server(ipv4);
debug!("OPNsense dhcp server set next server {ipv4}");
}
Ok(())
}
async fn set_dhcp_range(
&self,
start: &IpAddress,
end: &IpAddress,
) -> Result<(), ExecutorError> {
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense
.dhcp()
.set_dhcp_range(&start.to_string(), &end.to_string())
.await
.map_err(|dhcp_error| {
ExecutorError::UnexpectedError(format!("Failed to set_dhcp_range : {dhcp_error}"))
})
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_boot_filename(boot_filename);
debug!("OPNsense dhcp server set boot filename {boot_filename}");
}
Ok(())
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filename(filename);
debug!("OPNsense dhcp server set filename {filename}");
}
Ok(())
}
async fn set_filename64(&self, filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filename64(filename);
debug!("OPNsense dhcp server set filename {filename}");
}
Ok(())
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filenameipxe(filenameipxe);
debug!("OPNsense dhcp server set filenameipxe {filenameipxe}");
}
Ok(())
}
}

View File

@@ -1,11 +1,11 @@
use crate::infra::opnsense::Host;
use crate::infra::opnsense::IpAddress;
use crate::infra::opnsense::LogicalHost;
use crate::{
executors::ExecutorError,
topology::{DnsRecord, DnsServer},
};
use async_trait::async_trait;
use harmony_types::net::IpAddress;
use super::OPNSenseFirewall;
@@ -60,7 +60,7 @@ impl DnsServer for OPNSenseFirewall {
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
OPNSenseFirewall::get_ip(&self)
}
fn get_host(&self) -> LogicalHost {

Some files were not shown because too many files have changed in this diff Show More