Compare commits

..

8 Commits

Author SHA1 Message Date
39208c5e86 Merge branch 'master' into better-cli
All checks were successful
Run Check Script / check (pull_request) Successful in -44s
2025-07-31 13:24:34 +00:00
Ian Letourneau
6b36b1c7e9 rename some event attributes
All checks were successful
Run Check Script / check (pull_request) Successful in -38s
2025-07-31 09:24:05 -04:00
Ian Letourneau
507556969a add k3d todo
All checks were successful
Run Check Script / check (pull_request) Successful in -35s
2025-07-31 08:05:06 -04:00
Ian Letourneau
68fde23f2c remove unused inquire dependency for Harmony
All checks were successful
Run Check Script / check (pull_request) Successful in -36s
2025-07-30 21:42:09 -04:00
Ian Letourneau
49f1e56599 add event to track progress of interprets, change a bunch of info! to debug!
All checks were successful
Run Check Script / check (pull_request) Successful in -34s
2025-07-30 21:34:27 -04:00
Ian Letourneau
ff7801a7c1 ensure event handlers are properly subscribed on init, extract duplicated progress functions, cleanup duplication
All checks were successful
Run Check Script / check (pull_request) Successful in -31s
2025-07-30 12:22:04 -04:00
Ian Letourneau
8fae9cf8c8 split instrumentation in 2 different places: harmony domain (for domain observability) & harmoy composer (for build/commands observability)
All checks were successful
Run Check Script / check (pull_request) Successful in -34s
2025-07-27 20:52:24 -04:00
Ian Letourneau
6f7e1640c1 fix(cli): reduce noise & better track progress within Harmony
All checks were successful
Run Check Script / check (pull_request) Successful in -35s
2025-07-27 17:41:43 -04:00
177 changed files with 1552 additions and 13645 deletions

View File

@@ -9,7 +9,7 @@ jobs:
check:
runs-on: docker
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -7,7 +7,7 @@ on:
jobs:
package_harmony_composer:
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
runs-on: dind
steps:
- name: Checkout code
@@ -45,14 +45,14 @@ jobs:
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
| jq -r '.id // empty')
if [ -n "$RELEASE_ID" ]; then
# Delete existing release
curl -X DELETE \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
fi
# Create new release
RESPONSE=$(curl -X POST \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
@@ -65,7 +65,7 @@ jobs:
"prerelease": true
}' \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
- name: Upload Linux binary

802
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,6 @@ members = [
"harmony_cli",
"k3d",
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret",
]
[workspace.package]
@@ -23,7 +20,7 @@ readme = "README.md"
license = "GNU AGPL v3"
[workspace.dependencies]
log = { version = "0.4", features = ["kv"] }
log = "0.4"
env_logger = "0.11"
derive-new = "0.7"
async-trait = "0.1"
@@ -56,12 +53,6 @@ chrono = "0.4"
similar = "2"
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
pretty_assertions = "1.4.1"
tempfile = "3.20.0"
bollard = "0.19.1"
base64 = "0.22.1"
tar = "0.4.44"
lazy_static = "1.5.0"
directories = "6.0.0"
thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"

View File

@@ -13,7 +13,6 @@ WORKDIR /app
RUN rustup target add x86_64-pc-windows-gnu
RUN rustup target add x86_64-unknown-linux-gnu
RUN rustup component add rustfmt
RUN rustup component add clippy
RUN apt update
@@ -23,4 +22,4 @@ RUN apt install -y nodejs docker.io mingw-w64
COPY --from=build /app/target/release/harmony_composer .
ENTRYPOINT ["/app/harmony_composer"]
ENTRYPOINT ["/app/harmony_composer"]

View File

@@ -1,6 +1,5 @@
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
_By [NationTech](https://nationtech.io)_
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code.
*By [NationTech](https://nationtech.io)*
[![Build](https://git.nationtech.io/NationTech/harmony/actions/workflows/check.yml/badge.svg)](https://git.nationtech.io/nationtech/harmony)
[![License](https://img.shields.io/badge/license-AGPLv3-blue?style=flat-square)](LICENSE)
@@ -24,11 +23,11 @@ From a **developer laptop** to a **global production cluster**, a single **sourc
Infrastructure is essential, but it shouldnt be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
| Principle | What it means for you |
| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your applications needs match the target environments capabilities at **compile-time**, eliminating an entire class of runtime outages. |
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
| Principle | What it means for you |
|-----------|-----------------------|
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your applications needs match the target environments capabilities at **compile-time**, eliminating an entire class of runtime outages. |
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
@@ -64,20 +63,22 @@ async fn main() {
},
};
// 2. Enhance with extra scores (monitoring, CI/CD, …)
// 2. Pick where it should run
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
)
.await
.unwrap();
// 3. Enhance with extra scores (monitoring, CI/CD, …)
let mut monitoring = MonitoringAlertingStackScore::new();
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
// 3. Run your scores on the desired topology & inventory
harmony_cli::run(
Inventory::autoload(), // auto-detect hardware / kube-config
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
vec![
Box::new(lamp_stack),
Box::new(monitoring)
],
None
).await.unwrap();
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring)]);
// 4. Launch an interactive CLI / TUI
harmony_cli::init(maestro, None).await.unwrap();
}
```
@@ -93,13 +94,13 @@ Harmony analyses the code, shows an execution plan in a TUI, and applies it once
## 3 · Core Concepts
| Term | One-liner |
| ---------------- | ---------------------------------------------------------------------------------------------------- |
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
| Term | One-liner |
|------|-----------|
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified *Capabilities* (Kubernetes, DNS, …). |
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
A visual overview is in the diagram below.
@@ -111,9 +112,9 @@ A visual overview is in the diagram below.
Prerequisites:
- Rust
- Docker (if you deploy locally)
- `kubectl` / `helm` for Kubernetes-based topologies
* Rust
* Docker (if you deploy locally)
* `kubectl` / `helm` for Kubernetes-based topologies
```bash
git clone https://git.nationtech.io/nationtech/harmony
@@ -125,15 +126,15 @@ cargo build --release # builds the CLI, TUI and libraries
## 5 · Learning More
- **Architectural Decision Records** dive into the rationale
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
* **Architectural Decision Records** dive into the rationale
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
- **Extending Harmony** write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
* **Extending Harmony** write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
- **Community** discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
* **Community** discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
---
@@ -147,4 +148,4 @@ See [LICENSE](LICENSE) for the full text.
---
_Made with ❤️ & 🦀 by the NationTech and the Harmony community_
*Made with ❤️ & 🦀 by the NationTech and the Harmony community*

View File

@@ -1,7 +1,5 @@
#!/bin/sh
set -e
cargo check --all-targets --all-features --keep-going
cargo fmt --check
cargo clippy
cargo test

View File

@@ -1,3 +0,0 @@
Here lies all the data files required for an OKD cluster PXE boot setup.
This inclues ISO files, binary boot files, ipxe, etc.

Binary file not shown.

Binary file not shown.

View File

@@ -1,108 +0,0 @@
# OPNsense PXE Lab Environment
This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall.
## Overview
The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM:
1. **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab.
2. **Two Virtual Machines**:
* `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server.
* `pxe-node-1`: A client VM configured to boot from the network.
## Prerequisites
Ensure you have the following software installed on your Arch Linux host:
* `libvirt`
* `qemu`
* `virt-install` (from the `virt-install` package)
* `curl`
* `bzip2`
## Usage
### 1. Create the Environment
Run the `up` command to download the necessary images and create the network and VMs.
```bash
sudo ./pxe_vm_lab_setup.sh up
```
### 2. Install and Configure OPNsense
The OPNsense VM is created but the OS needs to be installed manually via the console.
1. **Connect to the VM console**:
```bash
sudo virsh console opnsense-pxe
```
2. **Log in as the installer**:
* Username: `installer`
* Password: `opnsense`
3. **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`):
* Find the MAC address for the `harmonylan` interface by running this command in another terminal:
```bash
virsh domiflist opnsense-pxe
# Example output:
# Interface Type Source Model MAC
# ---------------------------------------------------------
# vnet18 network default virtio 52:54:00:b5:c4:6d
# vnet19 network harmonylan virtio 52:54:00:21:f9:ba
```
* Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**.
* Assign the other interface as your **WAN**.
4. After the installation is complete, **shut down** the VM from the console menu.
5. **Detach the installation media** by editing the VM's configuration:
```bash
sudo virsh edit opnsense-pxe
```
Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`).
6. **Start the VM** to boot into the newly installed system:
```bash
sudo virsh start opnsense-pxe
```
### 3. Connect to OPNsense from Your Host
To configure OPNsense, you need to connect your host to the `harmonylan` network.
1. By default, OPNsense configures its LAN interface with the IP `192.168.1.1`.
2. Assign a compatible IP address to your host's `virbr1` bridge interface:
```bash
sudo ip addr add 192.168.1.5/24 dev virbr1
```
3. You can now access the OPNsense VM from your host:
* **SSH**: `ssh root@192.168.1.1` (password: `opnsense`)
* **Web UI**: `https://192.168.1.1`
### 4. Configure PXE Services with Harmony
With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details:
* **Hostname/IP**: `192.168.1.1`
* **Credentials**: `root` / `opnsense`
### 5. Boot the PXE Client
Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network.
```bash
sudo virsh start pxe-node-1
sudo virsh console pxe-node-1
```
## Cleanup
To destroy all VMs and networks created by the script, run the `clean` command:
```bash
sudo ./pxe_vm_lab_setup.sh clean
```

View File

@@ -1,191 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# --- Configuration ---
LAB_DIR="/var/lib/harmony_pxe_test"
IMG_DIR="${LAB_DIR}/images"
STATE_DIR="${LAB_DIR}/state"
VM_OPN="opnsense-pxe"
VM_PXE="pxe-node-1"
NET_HARMONYLAN="harmonylan"
# Network settings for the isolated LAN
VLAN_CIDR="192.168.150.0/24"
VLAN_GW="192.168.150.1"
VLAN_MASK="255.255.255.0"
# VM Specifications
RAM_OPN="2048"
VCPUS_OPN="2"
DISK_OPN_GB="10"
OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant
RAM_PXE="4096"
VCPUS_PXE="2"
DISK_PXE_GB="40"
OS_VARIANT_LINUX="centos-stream9"
OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2"
OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img"
CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso"
CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso"
CONNECT_URI="qemu:///system"
download_if_missing() {
local url="$1"
local dest="$2"
if [[ ! -f "$dest" ]]; then
echo "Downloading $url to $dest"
mkdir -p "$(dirname "$dest")"
local tmp
tmp="$(mktemp)"
curl -L --progress-bar "$url" -o "$tmp"
case "$url" in
*.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;;
*) mv "$tmp" "$dest" ;;
esac
else
echo "Already present: $dest"
fi
}
# Ensures a libvirt network is defined and active
ensure_network() {
local net_name="$1"
local net_xml_path="$2"
if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then
echo "Network ${net_name} already exists."
else
echo "Defining network ${net_name} from ${net_xml_path}"
virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}"
fi
if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then
echo "Starting network ${net_name}..."
virsh --connect "${CONNECT_URI}" net-start "${net_name}"
virsh --connect "${CONNECT_URI}" net-autostart "${net_name}"
fi
}
# Destroys a VM completely
destroy_vm() {
local vm_name="$1"
if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then
echo "Destroying and undefining VM: ${vm_name}"
virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true
virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram
fi
}
# Destroys a libvirt network
destroy_network() {
local net_name="$1"
if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then
echo "Destroying and undefining network: ${net_name}"
virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true
virsh --connect "${CONNECT_URI}" net-undefine "$net_name"
fi
}
# --- Main Logic ---
create_lab_environment() {
# Create network definition files
cat > "${STATE_DIR}/default.xml" <<EOF
<network>
<name>default</name>
<forward mode='nat'/>
<bridge name='virbr0' stp='on' delay='0'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.100' end='192.168.122.200'/>
</dhcp>
</ip>
</network>
EOF
cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF
<network>
<name>${NET_HARMONYLAN}</name>
<bridge name='virbr1' stp='on' delay='0'/>
</network>
EOF
# Ensure both networks exist and are active
ensure_network "default" "${STATE_DIR}/default.xml"
ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml"
# --- Create OPNsense VM (MODIFIED SECTION) ---
local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2"
if [[ ! -f "$disk_opn" ]]; then
qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G"
fi
echo "Creating OPNsense VM using serial image..."
virt-install \
--connect "${CONNECT_URI}" \
--name "${VM_OPN}" \
--ram "${RAM_OPN}" \
--vcpus "${VCPUS_OPN}" \
--cpu host-passthrough \
--os-variant "${OS_VARIANT_OPN}" \
--graphics none \
--noautoconsole \
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
--network network=default,model=virtio \
--network network="${NET_HARMONYLAN}",model=virtio \
--boot uefi,menu=on
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
echo "The VM will boot from the serial installation image."
echo "Login with user 'installer' and password 'opnsense' to start the installation."
echo "Install onto the VirtIO disk (vtbd0)."
echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system."
# --- Create PXE Client VM ---
local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2"
if [[ ! -f "$disk_pxe" ]]; then
qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G"
fi
echo "Creating PXE client VM..."
virt-install \
--connect "${CONNECT_URI}" \
--name "${VM_PXE}" \
--ram "${RAM_PXE}" \
--vcpus "${VCPUS_PXE}" \
--cpu host-passthrough \
--os-variant "${OS_VARIANT_LINUX}" \
--graphics none \
--noautoconsole \
--disk path="${disk_pxe}",format=qcow2,bus=virtio \
--network network="${NET_HARMONYLAN}",model=virtio \
--pxe \
--boot uefi,menu=on
echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}."
}
# --- Script Entrypoint ---
case "${1:-}" in
up)
mkdir -p "${IMG_DIR}" "${STATE_DIR}"
download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH"
download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH"
create_lab_environment
echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs."
;;
clean)
destroy_vm "${VM_PXE}"
destroy_vm "${VM_OPN}"
destroy_network "${NET_HARMONYLAN}"
# Optionally destroy the default network if you want a full reset
# destroy_network "default"
echo "Cleanup complete."
;;
*)
echo "Usage: sudo $0 {up|clean}"
exit 1
;;
esac

View File

@@ -1,14 +0,0 @@
[package]
name = "example-application-monitoring-with-tenant"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
env_logger.workspace = true
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
logging = "0.1.0"
tokio.workspace = true
url.workspace = true

View File

@@ -1,55 +0,0 @@
use std::{path::PathBuf, str::FromStr, sync::Arc};
use harmony::{
data::Id,
inventory::Inventory,
modules::{
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
tenant::TenantScore,
},
topology::{K8sAnywhereTopology, Url, tenant::TenantConfig},
};
#[tokio::main]
async fn main() {
//TODO there is a bug where the application is deployed into the namespace matching the
//application name and the tenant is created in the namesapce matching the tenant name
//in order for the application to be deployed in the tenant namespace the application.name and
//the TenantConfig.name must match
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
name: "example-monitoring".to_string(),
..Default::default()
},
};
let application = Arc::new(RustWebapp {
name: "example-monitoring".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
});
let webhook_receiver = WebhookReceiver {
name: "sample-webhook-receiver".to_string(),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![Box::new(Monitoring {
alert_receiver: vec![Box::new(webhook_receiver)],
application: application.clone(),
})],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant), Box::new(app)],
None,
)
.await
.unwrap();
}

View File

@@ -1,21 +1,20 @@
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::LocalhostTopology,
};
#[tokio::main]
async fn main() {
harmony_cli::run(
Inventory::autoload(),
LocalhostTopology::new(),
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
None,
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = LocalhostTopology::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -125,47 +125,40 @@ spec:
name: nginx"#,
)
.unwrap();
deployment
return deployment;
}
fn nginx_deployment_2() -> Deployment {
let pod_template = PodTemplateSpec {
metadata: Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
let mut pod_template = PodTemplateSpec::default();
pod_template.metadata = Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
..Default::default()
});
pod_template.spec = Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}),
spec: Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}],
..Default::default()
}),
}],
..Default::default()
});
let mut spec = DeploymentSpec::default();
spec.template = pod_template;
spec.selector = LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
};
let spec = DeploymentSpec {
template: pod_template,
selector: LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
},
..Default::default()
};
let mut deployment = Deployment::default();
deployment.spec = Some(spec);
deployment.metadata.name = Some("nginx-test".to_string());
Deployment {
spec: Some(spec),
metadata: ObjectMeta {
name: Some("nginx-test".to_string()),
..Default::default()
},
..Default::default()
}
deployment
}
fn nginx_deployment() -> Deployment {

View File

@@ -1,6 +1,7 @@
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::lamp::{LAMPConfig, LAMPScore},
topology::{K8sAnywhereTopology, Url},
};
@@ -23,7 +24,7 @@ async fn main() {
// This config can be extended as needed for more complicated configurations
config: LAMPConfig {
project_root: "./php".into(),
database_size: "4Gi".to_string().into(),
database_size: format!("4Gi").into(),
..Default::default()
},
};
@@ -42,13 +43,15 @@ async fn main() {
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
// locally, to development environment from a CI, to staging, and to production with settings
// that automatically adapt to each environment grade.
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(lamp_stack)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(lamp_stack)]);
// Here we bootstrap the CLI, this gives some nice features if you need them
harmony_cli::init(maestro, None).await.unwrap();
}
// That's it, end of the infra as code.

View File

@@ -2,6 +2,7 @@ use std::collections::HashMap;
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
@@ -50,8 +51,8 @@ async fn main() {
let service_monitor_endpoint = ServiceMonitorEndpoint {
port: Some("80".to_string()),
path: Some("/metrics".to_string()),
scheme: Some(HTTPScheme::HTTP),
path: "/metrics".to_string(),
scheme: HTTPScheme::HTTP,
..Default::default()
};
@@ -73,13 +74,13 @@ async fn main() {
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
service_monitors: vec![service_monitor],
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(alerting_score)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(alerting_score)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -1,8 +1,9 @@
use std::{collections::HashMap, str::FromStr};
use std::collections::HashMap;
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
@@ -28,7 +29,7 @@ use harmony::{
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("1234").unwrap(),
id: Id::from_string("1234".to_string()),
name: "test-tenant".to_string(),
resource_limits: ResourceLimits {
cpu_request_cores: 6.0,
@@ -53,8 +54,8 @@ async fn main() {
let service_monitor_endpoint = ServiceMonitorEndpoint {
port: Some("80".to_string()),
path: Some("/metrics".to_string()),
scheme: Some(HTTPScheme::HTTP),
path: "/metrics".to_string(),
scheme: HTTPScheme::HTTP,
..Default::default()
};
@@ -77,13 +78,13 @@ async fn main() {
rules: vec![Box::new(additional_rules)],
service_monitors: vec![service_monitor],
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant), Box::new(alerting_score)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(tenant), Box::new(alerting_score)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -8,6 +8,7 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
http::StaticFilesHttpScore,
ipxe::IpxeScore,
@@ -125,26 +126,20 @@ async fn main() {
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore {
folder_to_serve: Some(Url::LocalFolder("./data/watchguard/pxe-http-files".to_string())),
files: vec![],
};
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let ipxe_score = IpxeScore::new();
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
],
)
.await
.unwrap();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -1,18 +1,20 @@
use harmony::{
inventory::Inventory, modules::monitoring::ntfy::ntfy::NtfyScore, topology::K8sAnywhereTopology,
inventory::Inventory, maestro::Maestro, modules::monitoring::ntfy::ntfy::NtfyScore,
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(NtfyScore {
namespace: "monitoring".to_string(),
host: "localhost".to_string(),
})],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(NtfyScore {
namespace: "monitoring".to_string(),
host: "localhost".to_string(),
})]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -1,19 +0,0 @@
[package]
name = "example-pxe"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
askama = "0.14.0"

View File

@@ -1,92 +0,0 @@
mod topology;
use std::net::IpAddr;
use askama::Template;
use harmony::{
data::{FileContent, FilePath},
modules::{dhcp::DhcpScore, http::StaticFilesHttpScore, tftp::TftpScore},
score::Score,
topology::{HAClusterTopology, Url},
};
use crate::topology::{get_inventory, get_topology};
#[tokio::main]
async fn main() {
let inventory = get_inventory();
let topology = get_topology().await;
let gateway_ip = &topology.router.get_gateway();
let kickstart_filename = "inventory.kickstart";
let cluster_pubkey_filename = "cluster_ssh_key.pub";
let harmony_inventory_agent = "harmony_inventory_agent";
// TODO this should be a single IPXEScore instead of having the user do this step by step
let scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![
Box::new(DhcpScore {
host_binding: vec![],
next_server: Some(topology.router.get_gateway()),
boot_filename: None,
filename: Some("undionly.kpxe".to_string()),
filename64: Some("ipxe.efi".to_string()),
filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()),
}),
Box::new(TftpScore {
files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()),
}),
Box::new(StaticFilesHttpScore {
// TODO The current russh based copy is way too slow, check for a lib update or use scp
// when available
//
// For now just run :
// scp -r data/pxe/okd/http_files/* root@192.168.1.1:/usr/local/http/
//
folder_to_serve: None,
// folder_to_serve: Some(Url::LocalFolder("./data/pxe/okd/http_files/".to_string())),
files: vec![
FileContent {
path: FilePath::Relative("boot.ipxe".to_string()),
content: BootIpxeTpl { gateway_ip }.to_string(),
},
FileContent {
path: FilePath::Relative(kickstart_filename.to_string()),
content: InventoryKickstartTpl {
gateway_ip,
harmony_inventory_agent,
cluster_pubkey_filename,
}.to_string(),
},
FileContent {
path: FilePath::Relative("fallback.ipxe".to_string()),
content: FallbackIpxeTpl { gateway_ip, kickstart_filename}.to_string(),
},
],
}),
];
harmony_cli::run(inventory, topology, scores, None)
.await
.unwrap();
}
#[derive(Template)]
#[template(path = "boot.ipxe.j2")]
struct BootIpxeTpl<'a> {
gateway_ip: &'a IpAddr,
}
#[derive(Template)]
#[template(path = "fallback.ipxe.j2")]
struct FallbackIpxeTpl<'a> {
gateway_ip: &'a IpAddr,
kickstart_filename: &'a str,
}
#[derive(Template)]
#[template(path = "inventory.kickstart.j2")]
struct InventoryKickstartTpl<'a> {
gateway_ip: &'a IpAddr,
cluster_pubkey_filename: &'a str,
harmony_inventory_agent: &'a str,
}

View File

@@ -1,65 +0,0 @@
use std::{
net::{IpAddr, Ipv4Addr},
sync::Arc,
};
use cidr::Ipv4Cidr;
use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("opnsense-1"),
};
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
);
let lan_subnet = ipv4!("192.168.1.0");
let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![LogicalHost {
ip: ip!("10.100.8.20"),
name: "cp0".to_string(),
}],
bootstrap_host: LogicalHost {
ip: ip!("10.100.8.20"),
name: "cp0".to_string(),
},
workers: vec![],
switch: vec![],
}
}
pub fn get_inventory() -> Inventory {
Inventory {
location: Location::new(
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
"testopnsense".to_string(),
),
switch: SwitchGroup::from([]),
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
.management(Arc::new(OPNSenseManagementInterface::new()))]),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![],
}
}

View File

@@ -1,6 +0,0 @@
#!ipxe
set base-url http://{{ gateway_ip }}:8080
set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe
chain ${hostfile} || chain ${base-url}/fallback.ipxe

View File

@@ -1,40 +0,0 @@
#!ipxe
# =================================================================
# Harmony Discovery Agent - Default Boot Script (default.ipxe)
# =================================================================
#
# This script boots the CentOS Stream live environment for the
# purpose of hardware inventory. It loads the kernel and initramfs
# directly and passes a Kickstart URL for full automation.
#
# --- Configuration
# Set the base URL for where the CentOS kernel/initrd are hosted.
set os_base_url http://{{gateway_ip}}:8080/os/centos-stream-9
# Set the URL for the Kickstart file.
set ks_url http://{{ gateway_ip }}:8080/{{ kickstart_filename }}
# --- Boot Process
echo "Harmony: Starting automated node discovery..."
echo "Fetching kernel from ${os_base_url}/vmlinuz..."
kernel ${os_base_url}/vmlinuz
echo "Fetching initramfs from ${os_base_url}/initrd.img..."
initrd ${os_base_url}/initrd.img
echo "Configuring kernel boot arguments..."
# Kernel Arguments Explained:
# - initrd=initrd.img: Specifies the initial ramdisk to use.
# - inst.stage2: Points to the OS source. For a live boot, the base URL is sufficient.
# - inst.ks: CRITICAL: Points to our Kickstart file for automation.
# - ip=dhcp: Ensures the live environment configures its network.
# - console=...: Provides boot output on both serial and graphical consoles for debugging.
imgargs vmlinuz initrd=initrd.img inst.stage2=${os_base_url} inst.ks=${ks_url} ip=dhcp console=ttyS0,115200 console=tty1
echo "Booting into CentOS Stream 9 live environment..."
boot || goto failed
:failed
echo "Boot failed. Dropping to iPXE shell."
shell

View File

@@ -1,92 +0,0 @@
# =================================================================
# Harmony Discovery Agent - Kickstart File (inventory.kickstart)
# =================================================================
#
# This Kickstart file configures the CentOS Stream 9 live environment.
# It does NOT install to disk. It sets up SSH for remote access
# and downloads and runs the harmony-inventory-agent.
#
# --- System Configuration
lang en_US.UTF-8
keyboard --xlayouts='us'
timezone America/New_York --isUtc
# --- Network Configuration
# Ensure the network is activated using DHCP.
network --bootproto=dhcp --device=link --activate
# --- Security Configuration
# Disable the firewall for this isolated provisioning network.
firewall --disabled
# Disable SELinux for simplicity in the live environment.
selinux --disabled
# Disable password-based root login for security.
rootpw --lock
# --- Service Configuration
# Ensure the SSH daemon is enabled.
services --enabled="sshd"
# We are running a live environment, so no disk partitioning.
# The 'liveimg' command would be used here if booting from a squashfs,
# but since we are booting from kernel/initrd, we just use the %post.
# Do not run the graphical initial setup wizard.
firstboot --disable
# --- Post-Boot Scripting
# This section runs after the live environment has booted into RAM.
%post --log=/root/ks-post.log
echo "Harmony Kickstart: Post-boot script started."
# 1. Configure SSH Access
# Create the .ssh directory and set correct permissions.
echo " - Setting up SSH authorized_keys..."
mkdir -p /root/.ssh
chmod 700 /root/.ssh
# Download the public key and place it in authorized_keys.
curl -sSL "http://{{ gateway_ip }}:8080/{{ cluster_pubkey_filename }}" -o /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
# SELinux context is handled by 'selinux --disabled' above,
# but if SELinux were enabled, this would be essential:
# restorecon -R /root/.ssh
# 2. Download the Harmony Inventory Agent
echo " - Downloading harmony-inventory-agent..."
curl -sSL "http://{{ gateway_ip }}:8080/{{ harmony_inventory_agent }}" -o /usr/local/bin/harmony-inventory-agent
chmod +x /usr/local/bin/harmony-inventory-agent
# 3. Create a systemd service to run the agent persistently
echo " - Creating systemd service for the agent..."
cat > /etc/systemd/system/harmony-agent.service << EOF
[Unit]
Description=Harmony Inventory Agent
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/usr/local/bin/harmony-inventory-agent
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# 4. Enable and start the service
echo " - Enabling and starting harmony-agent.service..."
systemctl daemon-reload
systemctl enable --now harmony-agent.service
echo "Harmony Kickstart: Post-boot script finished. The inventory agent is running."
curl localhost:8080/inventory | tee -a /tmp/harmony_inventory.json
%end
# Do not automatically reboot or poweroff.
# The machine should remain running for inventory scraping.

View File

@@ -8,6 +8,7 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::StaticFilesHttpScore,
@@ -80,31 +81,23 @@ async fn main() {
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore {
folder_to_serve: Some(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
)),
files: vec![],
};
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
)
.await
.unwrap();
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -2,20 +2,24 @@ use std::{path::PathBuf, sync::Arc};
use harmony::{
inventory::Inventory,
modules::{
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
},
monitoring::alert_channel::{
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
},
maestro::Maestro,
modules::application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
},
topology::{K8sAnywhereTopology, Url},
};
use harmony_cli::cli_logger;
#[tokio::main]
async fn main() {
let cli_logger_handle = cli_logger::init();
let topology = K8sAnywhereTopology::from_env();
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
.await
.unwrap();
let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
@@ -23,16 +27,6 @@ async fn main() {
framework: Some(RustWebFramework::Leptos),
});
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
};
let webhook_receiver = WebhookReceiver {
name: "sample-webhook-receiver".to_string(),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
};
let app = ApplicationScore {
features: vec![
Box::new(ContinuousDelivery {
@@ -40,19 +34,13 @@ async fn main() {
}),
Box::new(Monitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver), Box::new(webhook_receiver)],
}),
// TODO add backups, multisite ha, etc
}), // TODO: add backups, multisite ha, etc.
],
application,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(app)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(app)]);
harmony_cli::init(maestro, None).await.unwrap();
let _ = tokio::try_join!(cli_logger_handle);
}

View File

@@ -1,8 +1,7 @@
use std::str::FromStr;
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::tenant::TenantScore,
topology::{K8sAnywhereTopology, tenant::TenantConfig},
};
@@ -11,20 +10,21 @@ use harmony::{
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
id: Id::from_str("test-tenant-id"),
name: "testtenant".to_string(),
..Default::default()
},
};
harmony_cli::run(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(tenant)],
None,
)
.await
.unwrap();
maestro.register_all(vec![Box::new(tenant)]);
harmony_cli::init(maestro, None).await.unwrap();
}
// TODO write tests

View File

@@ -2,6 +2,7 @@ use std::net::{SocketAddr, SocketAddrV4};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
@@ -15,19 +16,18 @@ use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
harmony_tui::run(
Inventory::autoload(),
DummyInfra {},
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
],
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
]);
harmony_tui::init(maestro).await.unwrap();
}
fn build_large_score() -> LoadBalancerScore {

View File

@@ -5,9 +5,6 @@ version.workspace = true
readme.workspace = true
license.workspace = true
[features]
testing = []
[dependencies]
rand = "0.9"
hex = "0.4"
@@ -16,8 +13,8 @@ reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde.workspace = true
serde_json.workspace = true
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
@@ -30,7 +27,7 @@ harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid.workspace = true
url.workspace = true
kube = { workspace = true, features = ["derive"] }
kube.workspace = true
k8s-openapi.workspace = true
serde_yaml.workspace = true
http.workspace = true
@@ -38,8 +35,8 @@ serde-value.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories.workspace = true
lazy_static.workspace = true
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true
@@ -59,15 +56,12 @@ similar.workspace = true
futures-util = "0.3.31"
tokio-util = "0.7.15"
strum = { version = "0.27.1", features = ["derive"] }
tempfile.workspace = true
tempfile = "3.20.0"
serde_with = "3.14.0"
schemars = "0.8.22"
kube-derive = "1.1.0"
bollard.workspace = true
tar.workspace = true
base64.workspace = true
once_cell = "1.21.3"
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
[dev-dependencies]
pretty_assertions.workspace = true

Binary file not shown.

View File

@@ -11,5 +11,5 @@ lazy_static! {
pub static ref REGISTRY_PROJECT: String =
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
pub static ref DRY_RUN: bool =
std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false));
std::env::var("HARMONY_DRY_RUN").map_or(true, |value| value.parse().unwrap_or(true));
}

View File

@@ -1,22 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileContent {
pub path: FilePath,
pub content: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum FilePath {
Relative(String),
Absolute(String),
}
impl std::fmt::Display for FilePath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FilePath::Relative(path) => f.write_fmt(format_args!("./{path}")),
FilePath::Absolute(path) => f.write_fmt(format_args!("/{path}")),
}
}
}

View File

@@ -1,6 +1,5 @@
use rand::distr::Alphanumeric;
use rand::distr::SampleString;
use std::str::FromStr;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
@@ -24,13 +23,13 @@ pub struct Id {
value: String,
}
impl FromStr for Id {
type Err = ();
impl Id {
pub fn from_string(value: String) -> Self {
Self { value }
}
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Id {
value: s.to_string(),
})
pub fn from_str(value: &str) -> Self {
Self::from_string(value.to_string())
}
}

View File

@@ -1,6 +1,4 @@
mod id;
mod version;
mod file;
pub use id::*;
pub use version::*;
pub use file::*;

View File

@@ -47,7 +47,7 @@ impl serde::Serialize for Version {
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.value.fmt(f)
return self.value.fmt(f);
}
}

View File

@@ -35,9 +35,10 @@ impl PhysicalHost {
pub fn cluster_mac(&self) -> MacAddress {
self.network
.first()
.get(0)
.expect("Cluster physical host should have a network interface")
.mac_address
.clone()
}
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {

View File

@@ -2,42 +2,28 @@ use log::debug;
use once_cell::sync::Lazy;
use tokio::sync::broadcast;
use crate::modules::application::ApplicationFeatureStatus;
use super::{
interpret::{InterpretError, Outcome},
topology::TopologyStatus,
};
use super::interpret::{InterpretError, Outcome};
#[derive(Debug, Clone)]
pub enum HarmonyEvent {
HarmonyStarted,
HarmonyFinished,
PrepareTopologyStarted {
topology: String,
},
TopologyPrepared {
topology: String,
outcome: Outcome,
},
InterpretExecutionStarted {
execution_id: String,
topology: String,
interpret: String,
score: String,
message: String,
},
InterpretExecutionFinished {
execution_id: String,
topology: String,
interpret: String,
score: String,
outcome: Result<Outcome, InterpretError>,
},
TopologyStateChanged {
topology: String,
status: TopologyStatus,
message: Option<String>,
},
ApplicationFeatureStateChanged {
topology: String,
application: String,
feature: String,
status: ApplicationFeatureStatus,
},
}
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
@@ -47,14 +33,9 @@ static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
});
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
if cfg!(any(test, feature = "testing")) {
let _ = event; // Suppress the "unused variable" warning for `event`
Ok(())
} else {
match HARMONY_EVENT_BUS.send(event) {
Ok(_) => Ok(()),
Err(_) => Err("send error: no subscribers"),
}
match HARMONY_EVENT_BUS.send(event) {
Ok(_) => Ok(()),
Err(_) => Err("send error: no subscribers"),
}
}

View File

@@ -7,7 +7,6 @@ use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::PreparationError,
};
pub enum InterpretName {
@@ -24,14 +23,6 @@ pub enum InterpretName {
TenantInterpret,
Application,
ArgoCD,
Alerting,
Ntfy,
HelmChart,
HelmCommand,
K8sResource,
Lamp,
ApplicationMonitoring,
K8sPrometheusCrdAlerting,
}
impl std::fmt::Display for InterpretName {
@@ -50,14 +41,6 @@ impl std::fmt::Display for InterpretName {
InterpretName::TenantInterpret => f.write_str("Tenant"),
InterpretName::Application => f.write_str("Application"),
InterpretName::ArgoCD => f.write_str("ArgoCD"),
InterpretName::Alerting => f.write_str("Alerting"),
InterpretName::Ntfy => f.write_str("Ntfy"),
InterpretName::HelmChart => f.write_str("HelmChart"),
InterpretName::HelmCommand => f.write_str("HelmCommand"),
InterpretName::K8sResource => f.write_str("K8sResource"),
InterpretName::Lamp => f.write_str("LAMP"),
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
}
}
}
@@ -130,14 +113,6 @@ impl std::fmt::Display for InterpretError {
}
impl Error for InterpretError {}
impl From<PreparationError> for InterpretError {
fn from(value: PreparationError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<ExecutorError> for InterpretError {
fn from(value: ExecutorError) -> Self {
Self {

View File

@@ -1,14 +1,14 @@
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use log::{debug, warn};
use log::{debug, info, warn};
use crate::topology::TopologyStatus;
use crate::instrumentation::{self, HarmonyEvent};
use super::{
interpret::{InterpretError, Outcome},
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{PreparationError, PreparationOutcome, Topology, TopologyState},
topology::Topology,
};
type ScoreVec<T> = Vec<Box<dyn Score<T>>>;
@@ -17,7 +17,7 @@ pub struct Maestro<T: Topology> {
inventory: Inventory,
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_state: TopologyState,
topology_preparation_result: Mutex<Option<Outcome>>,
}
impl<T: Topology> Maestro<T> {
@@ -25,46 +25,41 @@ impl<T: Topology> Maestro<T> {
///
/// This should rarely be used. Most of the time Maestro::initialize should be used instead.
pub fn new_without_initialization(inventory: Inventory, topology: T) -> Self {
let topology_name = topology.name().to_string();
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_state: TopologyState::new(topology_name),
topology_preparation_result: None.into(),
}
}
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, PreparationError> {
let mut instance = Self::new_without_initialization(inventory, topology);
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let instance = Self::new_without_initialization(inventory, topology);
instance.prepare_topology().await?;
Ok(instance)
}
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
async fn prepare_topology(&mut self) -> Result<PreparationOutcome, PreparationError> {
self.topology_state.prepare();
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
instrumentation::instrument(HarmonyEvent::PrepareTopologyStarted {
topology: self.topology.name().to_string(),
})
.unwrap();
let result = self.topology.ensure_ready().await;
let outcome = self.topology.ensure_ready().await?;
match result {
Ok(outcome) => {
match outcome.clone() {
PreparationOutcome::Success { details } => {
self.topology_state.success(details);
}
PreparationOutcome::Noop => {
self.topology_state.noop();
}
};
Ok(outcome)
}
Err(err) => {
self.topology_state.error(err.to_string());
Err(err)
}
}
instrumentation::instrument(HarmonyEvent::TopologyPrepared {
topology: self.topology.name().to_string(),
outcome: outcome.clone(),
})
.unwrap();
self.topology_preparation_result
.lock()
.unwrap()
.replace(outcome.clone());
Ok(outcome)
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
@@ -73,7 +68,15 @@ impl<T: Topology> Maestro<T> {
}
fn is_topology_initialized(&self) -> bool {
self.topology_state.status == TopologyStatus::Success
let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() {
match outcome.status {
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else {
false
}
}
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
@@ -84,8 +87,10 @@ impl<T: Topology> Maestro<T> {
self.topology.name(),
);
}
debug!("Interpreting score {score:?}");
let result = score.interpret(&self.inventory, &self.topology).await;
debug!("Running score {score:?}");
let interpret = score.create_interpret();
debug!("Launching interpret {interpret:?}");
let result = interpret.execute(&self.inventory, &self.topology).await;
debug!("Got result {result:?}");
result
}

View File

@@ -1,62 +1,22 @@
use std::collections::BTreeMap;
use async_trait::async_trait;
use serde::Serialize;
use serde_value::Value;
use super::{
data::Id,
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, Outcome},
inventory::Inventory,
topology::Topology,
};
use super::{interpret::Interpret, topology::Topology};
#[async_trait]
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
async fn interpret(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let id = Id::default();
let interpret = self.create_interpret();
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
message: format!("{} running...", interpret.get_name()),
})
.unwrap();
let result = interpret.execute(inventory, topology).await;
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
outcome: result.clone(),
})
.unwrap();
result
}
fn name(&self) -> String;
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
}
pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value;
}
impl<S, T> SerializeScore<T> for S
impl<'de, S, T> SerializeScore<T> for S
where
T: Topology,
S: Score<T> + Serialize,
@@ -64,7 +24,7 @@ where
fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble
// up?
serde_value::to_value(self).expect("Score should serialize successfully")
serde_value::to_value(&self).expect("Score should serialize successfully")
}
}

View File

@@ -1,12 +1,11 @@
use async_trait::async_trait;
use harmony_macros::ip;
use harmony_types::net::MacAddress;
use log::debug;
use log::info;
use crate::data::FileContent;
use crate::executors::ExecutorError;
use crate::topology::PxeOptions;
use crate::interpret::InterpretError;
use crate::interpret::Outcome;
use super::DHCPStaticEntry;
use super::DhcpServer;
@@ -20,8 +19,6 @@ use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::PreparationError;
use super::PreparationOutcome;
use super::Router;
use super::TftpServer;
@@ -51,11 +48,10 @@ impl Topology for HAClusterTopology {
fn name(&self) -> &str {
"HAClusterTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
debug!(
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
todo!(
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
);
Ok(PreparationOutcome::Noop)
)
}
}
@@ -157,10 +153,12 @@ impl DhcpServer for HAClusterTopology {
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
self.dhcp_server.list_static_mappings().await
}
async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> {
self.dhcp_server.set_pxe_options(options).await
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
self.dhcp_server.set_next_server(ip).await
}
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_boot_filename(boot_filename).await
}
fn get_ip(&self) -> IpAddress {
self.dhcp_server.get_ip()
}
@@ -170,6 +168,16 @@ impl DhcpServer for HAClusterTopology {
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.dhcp_server.commit_config().await
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename(filename).await
}
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename64(filename64).await
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filenameipxe(filenameipxe).await
}
}
#[async_trait]
@@ -213,21 +221,17 @@ impl HttpServer for HAClusterTopology {
self.http_server.serve_files(url).await
}
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
self.http_server.serve_file_content(file).await
}
fn get_ip(&self) -> IpAddress {
self.http_server.get_ip()
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.http_server.ensure_initialized().await
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.http_server.commit_config().await
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.http_server.reload_restart().await
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
@@ -237,15 +241,13 @@ pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
"DummyInfra"
todo!()
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(PreparationOutcome::Success {
details: dummy_msg.into(),
})
Ok(Outcome::success(dummy_msg.to_string()))
}
}
@@ -295,7 +297,19 @@ impl DhcpServer for DummyInfra {
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> {
async fn set_next_server(&self, _ip: IpAddress) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filename64(&self, _filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filenameipxe(&self, _filenameipxe: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
@@ -365,9 +379,6 @@ impl HttpServer for DummyInfra {
async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}

View File

@@ -1,4 +1,4 @@
use crate::{data::FileContent, executors::ExecutorError};
use crate::executors::ExecutorError;
use async_trait::async_trait;
use super::{IpAddress, Url};
@@ -6,7 +6,6 @@ use super::{IpAddress, Url};
#[async_trait]
pub trait HttpServer: Send + Sync {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>;
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
// async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError>;

View File

@@ -1,11 +1,12 @@
use derive_new::new;
use futures_util::StreamExt;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod},
};
use kube::{
Client, Config, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
runtime::reflector::Lookup,
@@ -16,25 +17,14 @@ use kube::{
runtime::wait::await_condition,
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::json;
use similar::TextDiff;
use tokio::io::AsyncReadExt;
use serde::de::DeserializeOwned;
use similar::{DiffableStr, TextDiff};
#[derive(new, Clone)]
pub struct K8sClient {
client: Client,
}
impl Serialize for K8sClient {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}
impl std::fmt::Debug for K8sClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// This is a poor man's debug implementation for now as kube::Client does not provide much
@@ -53,66 +43,6 @@ impl K8sClient {
})
}
pub async fn get_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<Option<Deployment>, Error> {
let deps: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(deps.get_opt(name).await?)
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
let pods: Api<Pod> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
}
pub async fn scale_deployment(
&self,
name: &str,
namespace: Option<&str>,
replicas: u32,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let patch = json!({
"spec": {
"replicas": replicas
}
});
let pp = PatchParams::default();
let scale = Patch::Apply(&patch);
deployments.patch_scale(name, &pp, &scale).await?;
Ok(())
}
pub async fn delete_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let delete_params = DeleteParams::default();
deployments.delete(name, &delete_params).await?;
Ok(())
}
pub async fn wait_until_deployment_ready(
&self,
name: String,
@@ -128,75 +58,13 @@ impl K8sClient {
}
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
let t = timeout.unwrap_or(300);
let t = if let Some(t) = timeout { t } else { 300 };
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
if res.is_ok() {
Ok(())
if let Ok(r) = res {
return Ok(());
} else {
Err("timed out while waiting for deployment".to_string())
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
&self,
name: String,
label: String,
namespace: Option<&str>,
command: Vec<&str>,
) -> Result<String, String> {
let api: Api<Pod>;
if let Some(ns) = namespace {
api = Api::namespaced(self.client.clone(), ns);
} else {
api = Api::default_namespaced(self.client.clone());
}
let pod_list = api
.list(&ListParams::default().labels(format!("{label}={name}").as_str()))
.await
.expect("couldn't get list of pods");
let res = api
.exec(
pod_list
.items
.first()
.expect("couldn't get pod")
.name()
.expect("couldn't get pod name")
.into_owned()
.as_str(),
command,
&AttachParams::default().stdout(true).stderr(true),
)
.await;
match res {
Err(e) => Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
.expect("Couldn't get status")
.await
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout().take() {
stdout.read_to_string(&mut stdout_buf).await;
}
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" {
Ok(stdout_buf)
} else {
Err(s)
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
}
}
return Err("timed out while waiting for deployment".to_string());
}
}
@@ -235,7 +103,7 @@ impl K8sClient {
.await;
match res {
Err(e) => Err(e.to_string()),
Err(e) => return Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
@@ -244,10 +112,14 @@ impl K8sClient {
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" { Ok(()) } else { Err(s) }
debug!("Status: {}", s);
if s == "Success" {
return Ok(());
} else {
return Err(s);
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
return Err("Couldn't get inner status of pod exec".to_string());
}
}
}
@@ -288,9 +160,8 @@ impl K8sClient {
trace!("Received current value {current:#?}");
// The resource exists, so we calculate and display a diff.
println!("\nPerforming dry-run for resource: '{}'", name);
let mut current_yaml = serde_yaml::to_value(&current).unwrap_or_else(|_| {
panic!("Could not serialize current value : {current:#?}")
});
let mut current_yaml = serde_yaml::to_value(&current)
.expect(&format!("Could not serialize current value : {current:#?}"));
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
let map = current_yaml.as_mapping_mut().unwrap();
let removed = map.remove_entry("status");
@@ -357,7 +228,7 @@ impl K8sClient {
}
}
pub async fn apply_many<K>(&self, resource: &[K], ns: Option<&str>) -> Result<Vec<K>, Error>
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as Resource>::Scope: ApplyStrategy<K>,
@@ -373,7 +244,7 @@ impl K8sClient {
pub async fn apply_yaml_many(
&self,
#[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>,
yaml: &Vec<serde_yaml::Value>,
ns: Option<&str>,
) -> Result<(), Error> {
for y in yaml.iter() {

View File

@@ -7,40 +7,22 @@ use tokio::sync::OnceCell;
use crate::{
executors::ExecutorError,
interpret::InterpretStatus,
interpret::{InterpretError, Outcome},
inventory::Inventory,
modules::{
k3d::K3DInstallationScore,
monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus,
prometheus_operator::prometheus_operator_helm_chart_score,
},
prometheus::{
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
prometheus::PrometheusApplicationMonitoring,
},
},
modules::k3d::K3DInstallationScore,
score::Score,
};
use super::{
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
PreparationOutcome, Topology,
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology,
k8s::K8sClient,
oberservability::monitoring::AlertReceiver,
tenant::{
TenantConfig, TenantManager,
k8s::K8sTenantManager,
network_policy::{
K3dNetworkPolicyStrategy, NetworkPolicyStrategy, NoopNetworkPolicyStrategy,
},
},
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
};
#[derive(Clone, Debug)]
struct K8sState {
client: Arc<K8sClient>,
source: K8sSource,
_source: K8sSource,
message: String,
}
@@ -74,42 +56,8 @@ impl K8sclient for K8sAnywhereTopology {
}
}
#[async_trait]
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
async fn install_prometheus(
&self,
sender: &CRDPrometheus,
inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> Result<PreparationOutcome, PreparationError> {
let po_result = self.ensure_prometheus_operator(sender).await?;
if po_result == PreparationOutcome::Noop {
debug!("Skipping Prometheus CR installation due to missing operator.");
return Ok(po_result);
}
let result = self
.get_k8s_prometheus_application_score(sender.clone(), receivers)
.await
.interpret(inventory, self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: outcome.message,
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
}
}
impl Serialize for K8sAnywhereTopology {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
@@ -134,19 +82,6 @@ impl K8sAnywhereTopology {
}
}
async fn get_k8s_prometheus_application_score(
&self,
sender: CRDPrometheus,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> K8sPrometheusCRDAlertingScore {
K8sPrometheusCRDAlertingScore {
sender,
receivers: receivers.unwrap_or_default(),
service_monitors: vec![],
prometheus_rules: vec![],
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
@@ -175,23 +110,15 @@ impl K8sAnywhereTopology {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), PreparationError> {
let result = self
.get_k3d_installation_score()
.interpret(&Inventory::empty(), self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(()),
InterpretStatus::NOOP => Ok(()),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
self.get_k3d_installation_score()
.create_interpret()
.execute(&Inventory::empty(), self)
.await?;
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, PreparationError> {
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
let k8s_anywhere_config = &self.config;
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
@@ -201,16 +128,16 @@ impl K8sAnywhereTopology {
} else {
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
debug!("Loading kubeconfig {kubeconfig}");
match self.try_load_kubeconfig(kubeconfig).await {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(client) => {
return Ok(Some(K8sState {
client: Arc::new(client),
source: K8sSource::Kubeconfig,
_source: K8sSource::Kubeconfig,
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
}));
}
None => {
return Err(PreparationError::new(format!(
return Err(InterpretError::new(format!(
"Failed to load kubeconfig from {kubeconfig}"
)));
}
@@ -247,7 +174,7 @@ impl K8sAnywhereTopology {
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),
source: K8sSource::LocalK3d,
_source: K8sSource::LocalK3d,
message: "K8s client ready".to_string(),
},
Err(_) => todo!(),
@@ -256,21 +183,15 @@ impl K8sAnywhereTopology {
Ok(Some(state))
}
async fn ensure_k8s_tenant_manager(&self, k8s_state: &K8sState) -> Result<(), String> {
if self.tenant_manager.get().is_some() {
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
if let Some(_) = self.tenant_manager.get() {
return Ok(());
}
self.tenant_manager
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
let k8s_client = self.k8s_client().await?;
let network_policy_strategy: Box<dyn NetworkPolicyStrategy> = match k8s_state.source
{
K8sSource::LocalK3d => Box::new(K3dNetworkPolicyStrategy::new()),
K8sSource::Kubeconfig => Box::new(NoopNetworkPolicyStrategy::new()),
};
Ok(K8sTenantManager::new(k8s_client, network_policy_strategy))
Ok(K8sTenantManager::new(k8s_client))
})
.await?;
@@ -285,55 +206,6 @@ impl K8sAnywhereTopology {
)),
}
}
async fn ensure_prometheus_operator(
&self,
sender: &CRDPrometheus,
) -> Result<PreparationOutcome, PreparationError> {
let status = Command::new("sh")
.args(["-c", "kubectl get crd -A | grep -i prometheuses"])
.status()
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
if !status.success() {
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => {
debug!("installing prometheus operator");
let op_score =
prometheus_operator_helm_chart_score(sender.namespace.clone());
let result = op_score.interpret(&Inventory::empty(), self).await;
return match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: "installed prometheus operator".into(),
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(
"failed to install prometheus operator (unknown error)".into(),
)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
};
}
K8sSource::Kubeconfig => {
debug!("unable to install prometheus operator, contact cluster admin");
return Ok(PreparationOutcome::Noop);
}
}
} else {
warn!("Unable to detect k8s_state. Skipping Prometheus Operator install.");
return Ok(PreparationOutcome::Noop);
}
}
debug!("Prometheus operator is already present, skipping install");
Ok(PreparationOutcome::Success {
details: "prometheus operator present in cluster".into(),
})
}
}
#[derive(Clone, Debug)]
@@ -391,25 +263,26 @@ impl Topology for K8sAnywhereTopology {
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(PreparationError::new(
"no K8s client could be found or installed".to_string(),
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
self.ensure_k8s_tenant_manager(k8s_state)
self.ensure_k8s_tenant_manager()
.await
.map_err(PreparationError::new)?;
.map_err(|e| InterpretError::new(e))?;
match self.is_helm_available() {
Ok(()) => Ok(PreparationOutcome::Success {
details: format!("{} + helm available", k8s_state.message.clone()),
}),
Err(e) => Err(PreparationError::new(format!("helm unavailable: {}", e))),
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}

View File

@@ -1,7 +1,9 @@
use async_trait::async_trait;
use derive_new::new;
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
use crate::interpret::{InterpretError, Outcome};
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
@@ -12,10 +14,10 @@ impl Topology for LocalhostTopology {
"LocalHostTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
Ok(PreparationOutcome::Success {
details: "Localhost is Chuck Norris, always ready.".into(),
})
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
Ok(Outcome::success(
"Localhost is Chuck Norris, always ready.".to_string(),
))
}
}

View File

@@ -6,7 +6,6 @@ mod k8s_anywhere;
mod localhost;
pub mod oberservability;
pub mod tenant;
use derive_new::new;
pub use k8s_anywhere::*;
pub use localhost::*;
pub mod k8s;
@@ -27,13 +26,10 @@ pub use tftp::*;
mod helm_command;
pub use helm_command::*;
use super::{
executors::ExecutorError,
instrumentation::{self, HarmonyEvent},
};
use std::error::Error;
use std::net::IpAddr;
use super::interpret::{InterpretError, Outcome};
/// Represents a logical view of an infrastructure environment providing specific capabilities.
///
/// A Topology acts as a self-contained "package" responsible for managing access
@@ -61,128 +57,9 @@ pub trait Topology: Send + Sync {
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
///
/// # Returns
/// - `Ok(PreparationOutcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(PreparationError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError>;
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PreparationOutcome {
Success { details: String },
Noop,
}
#[derive(Debug, Clone, new)]
pub struct PreparationError {
msg: String,
}
impl std::fmt::Display for PreparationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.msg)
}
}
impl Error for PreparationError {}
impl From<ExecutorError> for PreparationError {
fn from(value: ExecutorError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<kube::Error> for PreparationError {
fn from(value: kube::Error) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
impl From<String> for PreparationError {
fn from(value: String) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum TopologyStatus {
Queued,
Preparing,
Success,
Noop,
Error,
}
pub struct TopologyState {
pub topology: String,
pub status: TopologyStatus,
}
impl TopologyState {
pub fn new(topology: String) -> Self {
let instance = Self {
topology,
status: TopologyStatus::Queued,
};
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: instance.topology.clone(),
status: instance.status.clone(),
message: None,
})
.unwrap();
instance
}
pub fn prepare(&mut self) {
self.status = TopologyStatus::Preparing;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn success(&mut self, message: String) {
self.status = TopologyStatus::Success;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
pub fn noop(&mut self) {
self.status = TopologyStatus::Noop;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn error(&mut self, message: String) {
self.status = TopologyStatus::Error;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
}
#[derive(Debug)]
@@ -211,7 +88,7 @@ impl Serialize for Url {
{
match self {
Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(url.as_str()),
Url::Url(url) => serializer.serialize_str(&url.as_str()),
}
}
}

View File

@@ -46,19 +46,16 @@ pub trait K8sclient: Send + Sync {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
}
pub struct PxeOptions {
pub ipxe_filename: String,
pub bios_filename: String,
pub efi_filename: String,
pub tftp_ip: Option<IpAddress>,
}
#[async_trait]
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>;
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>;
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>;
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>;
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError>;
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
async fn commit_config(&self) -> Result<(), ExecutorError>;

View File

@@ -1,5 +1,3 @@
use std::any::Any;
use async_trait::async_trait;
use log::debug;
@@ -45,7 +43,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
}
fn get_name(&self) -> InterpretName {
InterpretName::Alerting
todo!()
}
fn get_version(&self) -> Version {
@@ -64,9 +62,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
#[async_trait]
pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
fn as_any(&self) -> &dyn Any;
}
#[async_trait]
@@ -76,6 +72,6 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
}
#[async_trait]
pub trait ScrapeTarget<S: AlertSender> {
pub trait ScrapeTarger<S: AlertSender> {
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
}

View File

@@ -27,11 +27,11 @@ pub struct UnmanagedRouter {
impl Router for UnmanagedRouter {
fn get_gateway(&self) -> IpAddress {
self.gateway
self.gateway.clone()
}
fn get_cidr(&self) -> Ipv4Cidr {
self.cidr
self.cidr.clone()
}
fn get_host(&self) -> LogicalHost {

View File

@@ -15,38 +15,36 @@ use k8s_openapi::{
apimachinery::pkg::util::intstr::IntOrString,
};
use kube::Resource;
use log::debug;
use log::{debug, info, warn};
use serde::de::DeserializeOwned;
use serde_json::json;
use tokio::sync::OnceCell;
use super::{TenantConfig, TenantManager, network_policy::NetworkPolicyStrategy};
use super::{TenantConfig, TenantManager};
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct K8sTenantManager {
k8s_client: Arc<K8sClient>,
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
}
impl K8sTenantManager {
pub fn new(
client: Arc<K8sClient>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
) -> Self {
pub fn new(client: Arc<K8sClient>) -> Self {
Self {
k8s_client: client,
k8s_tenant_config: Arc::new(OnceCell::new()),
network_policy_strategy,
}
}
}
impl K8sTenantManager {
fn get_namespace_name(&self, config: &TenantConfig) -> String {
config.name.clone()
}
fn ensure_constraints(&self, _namespace: &Namespace) -> Result<(), ExecutorError> {
// TODO: Ensure constraints are applied to namespace (https://git.nationtech.io/NationTech/harmony/issues/98)
warn!("Validate that when tenant already exists (by id) that name has not changed");
warn!("Make sure other Tenant constraints are respected by this k8s implementation");
Ok(())
}
@@ -221,6 +219,24 @@ impl K8sTenantManager {
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "10.43.0.1/32",
}
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "172.23.0.0/16",
}
}
]
},
{
"to": [
{
@@ -288,19 +304,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -345,19 +361,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -390,27 +406,12 @@ impl K8sTenantManager {
}
}
impl Clone for K8sTenantManager {
fn clone(&self) -> Self {
Self {
k8s_client: self.k8s_client.clone(),
k8s_tenant_config: self.k8s_tenant_config.clone(),
network_policy_strategy: self.network_policy_strategy.clone_box(),
}
}
}
#[async_trait]
impl TenantManager for K8sTenantManager {
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
let namespace = self.build_namespace(config)?;
let resource_quota = self.build_resource_quota(config)?;
let network_policy = self.build_network_policy(config)?;
let network_policy = self
.network_policy_strategy
.adjust_policy(network_policy, config);
let resource_limit_range = self.build_limit_range(config)?;
self.ensure_constraints(&namespace)?;
@@ -427,14 +428,13 @@ impl TenantManager for K8sTenantManager {
debug!("Creating network_policy for tenant {}", config.name);
self.apply_resource(network_policy, config).await?;
debug!(
info!(
"Success provisionning K8s tenant id {} name {}",
config.id, config.name
);
self.store_config(config);
Ok(())
}
async fn get_tenant_config(&self) -> Option<TenantConfig> {
self.k8s_tenant_config.get().cloned()
}

View File

@@ -1,11 +1,11 @@
pub mod k8s;
mod manager;
pub mod network_policy;
use std::str::FromStr;
use crate::data::Id;
pub use manager::*;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use crate::data::Id;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
pub struct TenantConfig {

View File

@@ -1,120 +0,0 @@
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::TenantConfig;
pub trait NetworkPolicyStrategy: Send + Sync + std::fmt::Debug {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy>;
fn adjust_policy(&self, policy: NetworkPolicy, config: &TenantConfig) -> NetworkPolicy;
}
#[derive(Clone, Debug)]
pub struct NoopNetworkPolicyStrategy {}
impl NoopNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for NoopNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for NoopNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
policy
}
}
#[derive(Clone, Debug)]
pub struct K3dNetworkPolicyStrategy {}
impl K3dNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for K3dNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for K3dNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
let mut egress = policy
.spec
.clone()
.unwrap_or_default()
.egress
.clone()
.unwrap_or_default();
egress.push(NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(), // TODO: query the IP range https://git.nationtech.io/NationTech/harmony/issues/108
..Default::default()
}),
..Default::default()
}]),
..Default::default()
});
NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(egress),
..policy.spec.unwrap_or_default()
}),
..policy
}
}
}
#[cfg(test)]
mod tests {
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::{K3dNetworkPolicyStrategy, NetworkPolicyStrategy};
#[test]
pub fn should_add_ip_block_for_k3d_harmony_server() {
let strategy = K3dNetworkPolicyStrategy::new();
let policy =
strategy.adjust_policy(NetworkPolicy::default(), &super::TenantConfig::default());
let expected_policy = NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(vec![NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(),
..Default::default()
}),
..Default::default()
}]),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
assert_eq!(expected_policy, policy);
}
}

View File

@@ -1,10 +1,10 @@
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use log::{debug, info};
use log::debug;
use crate::{
executors::ExecutorError,
topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost, PxeOptions},
topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost},
};
use super::OPNSenseFirewall;
@@ -26,7 +26,7 @@ impl DhcpServer for OPNSenseFirewall {
.unwrap();
}
info!("Registered {:?}", entry);
debug!("Registered {:?}", entry);
Ok(())
}
@@ -46,25 +46,57 @@ impl DhcpServer for OPNSenseFirewall {
self.host.clone()
}
async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> {
let mut writable_opnsense = self.opnsense_config.write().await;
let PxeOptions {
ipxe_filename,
bios_filename,
efi_filename,
tftp_ip,
} = options;
writable_opnsense
.dhcp()
.set_pxe_options(
tftp_ip.map(|i| i.to_string()),
bios_filename,
efi_filename,
ipxe_filename,
)
.await
.map_err(|dhcp_error| {
ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}"))
})
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
let ipv4 = match ip {
std::net::IpAddr::V4(ipv4_addr) => ipv4_addr,
std::net::IpAddr::V6(_) => todo!("ipv6 not supported yet"),
};
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_next_server(ipv4);
debug!("OPNsense dhcp server set next server {ipv4}");
}
Ok(())
}
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_boot_filename(boot_filename);
debug!("OPNsense dhcp server set boot filename {boot_filename}");
}
Ok(())
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filename(filename);
debug!("OPNsense dhcp server set filename {filename}");
}
Ok(())
}
async fn set_filename64(&self, filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filename64(filename);
debug!("OPNsense dhcp server set filename {filename}");
}
Ok(())
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filenameipxe(filenameipxe);
debug!("OPNsense dhcp server set filenameipxe {filenameipxe}");
}
Ok(())
}
}

View File

@@ -60,7 +60,7 @@ impl DnsServer for OPNSenseFirewall {
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
OPNSenseFirewall::get_ip(&self)
}
fn get_host(&self) -> LogicalHost {

View File

@@ -2,23 +2,23 @@ use async_trait::async_trait;
use log::info;
use crate::{
data::FileContent,
executors::ExecutorError,
topology::{HttpServer, IpAddress, Url},
};
use super::OPNSenseFirewall;
const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http";
#[async_trait]
impl HttpServer for OPNSenseFirewall {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
let http_root_path = "/usr/local/http";
let config = self.opnsense_config.read().await;
info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}");
info!("Uploading files from url {url} to {http_root_path}");
match url {
Url::LocalFolder(path) => {
config
.upload_files(path, OPNSENSE_HTTP_ROOT_PATH)
.upload_files(path, http_root_path)
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}
@@ -27,29 +27,8 @@ impl HttpServer for OPNSenseFirewall {
Ok(())
}
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
let path = match &file.path {
crate::data::FilePath::Relative(path) => {
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
}
crate::data::FilePath::Absolute(path) => {
return Err(ExecutorError::ConfigurationError(format!(
"Cannot serve file from http server with absolute path : {path}"
)));
}
};
let config = self.opnsense_config.read().await;
info!("Uploading file content to {}", path);
config
.upload_file_content(&path, &file.content)
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
Ok(())
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
todo!();
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
@@ -69,7 +48,7 @@ impl HttpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let caddy = config.caddy();
if caddy.get_full_config().is_none() {
if let None = caddy.get_full_config() {
info!("Http config not available in opnsense config, installing package");
config.install_package("os-caddy").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(

View File

@@ -121,12 +121,10 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
LoadBalancerService {
backend_servers,
listening_port: frontend.bind.parse().unwrap_or_else(|_| {
panic!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
frontend.bind
)
}),
listening_port: frontend.bind.parse().expect(&format!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
frontend.bind
)),
health_check,
}
})
@@ -169,28 +167,28 @@ pub(crate) fn get_health_check_for_backend(
None => return None,
};
let haproxy_health_check = haproxy
let haproxy_health_check = match haproxy
.healthchecks
.healthchecks
.iter()
.find(|h| &h.uuid == health_check_uuid)?;
.find(|h| &h.uuid == health_check_uuid)
{
Some(health_check) => health_check,
None => return None,
};
let binding = haproxy_health_check.health_check_type.to_uppercase();
let uppercase = binding.as_str();
match uppercase {
"TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
if !checkport.is_empty() {
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| {
panic!(
"HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
if checkport.len() > 0 {
return Some(HealthCheck::TCP(Some(checkport.parse().expect(&format!(
"HAProxy check port should be a valid port number, got {checkport}"
)))));
}
}
Some(HealthCheck::TCP(None))
return Some(HealthCheck::TCP(None));
}
"HTTP" => {
let path: String = haproxy_health_check
@@ -357,13 +355,16 @@ mod tests {
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let mut server = HAProxyServer::default();
server.uuid = "server3".to_string();
server.address = "192.168.1.3".to_string();
server.port = 8080;
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -383,12 +384,10 @@ mod tests {
let backend = HAProxyBackend::default();
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -403,12 +402,10 @@ mod tests {
backend.linked_servers.content = Some("server4,server5".to_string());
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -419,28 +416,20 @@ mod tests {
#[test]
fn test_get_servers_for_backend_multiple_linked_servers() {
// Create a backend with multiple linked servers
#[allow(clippy::field_reassign_with_default)]
let mut backend = HAProxyBackend::default();
backend.linked_servers.content = Some("server1,server2".to_string());
//
// Create an HAProxy instance with matching servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "some-hostname.test.mcd".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "some-hostname.test.mcd".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let server = HAProxyServer {
uuid: "server2".to_string(),
address: "192.168.1.2".to_string(),
port: 8080,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server2".to_string();
server.address = "192.168.1.2".to_string();
server.port = 8080;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result

View File

@@ -28,7 +28,7 @@ impl TftpServer for OPNSenseFirewall {
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
todo!()
}
async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> {
@@ -58,7 +58,7 @@ impl TftpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let tftp = config.tftp();
if tftp.get_full_config().is_none() {
if let None = tftp.get_full_config() {
info!("Tftp config not available in opnsense config, installing package");
config.install_package("os-tftp").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(

View File

@@ -13,7 +13,7 @@ pub trait ApplicationFeature<T: Topology>:
fn name(&self) -> String;
}
pub trait ApplicationFeatureClone<T: Topology> {
trait ApplicationFeatureClone<T: Topology> {
fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>;
}
@@ -27,7 +27,7 @@ where
}
impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{

View File

@@ -184,11 +184,12 @@ impl ArgoApplication {
pub fn to_yaml(&self) -> serde_yaml::Value {
let name = &self.name;
let namespace = if let Some(ns) = self.namespace.as_ref() {
ns
&ns
} else {
"argocd"
};
let project = &self.project;
let source = &self.source;
let yaml_str = format!(
r#"
@@ -227,7 +228,7 @@ spec:
serde_yaml::to_value(&self.source).expect("couldn't serialize source to value");
let sync_policy = serde_yaml::to_value(&self.sync_policy)
.expect("couldn't serialize sync_policy to value");
let revision_history_limit = serde_yaml::to_value(self.revision_history_limit)
let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit)
.expect("couldn't serialize revision_history_limit to value");
spec.insert(

View File

@@ -1,7 +1,7 @@
use std::{io::Write, process::Command, sync::Arc};
use async_trait::async_trait;
use log::info;
use log::{debug, error};
use serde_yaml::Value;
use tempfile::NamedTempFile;
@@ -10,7 +10,7 @@ use crate::{
data::Version,
inventory::Inventory,
modules::application::{
ApplicationFeature, HelmPackage, OCICompliant,
Application, ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
},
score::Score,
@@ -56,11 +56,14 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
chart_url: String,
image_name: String,
) -> Result<(), String> {
// TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
// https://git.nationtech.io/NationTech/harmony/issues/106
error!(
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
);
error!("TODO hardcoded k3d bin path is wrong");
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
// --- 1. Import the container image into the k3d cluster ---
info!(
debug!(
"Importing image '{}' into k3d cluster 'harmony'",
image_name
);
@@ -77,7 +80,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
}
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
info!("Retrieving kubeconfig for k3d cluster 'harmony'");
debug!("Retrieving kubeconfig for k3d cluster 'harmony'");
let kubeconfig_output = Command::new(&k3d_bin_path)
.args(["kubeconfig", "get", "harmony"])
.output()
@@ -98,7 +101,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
// --- 3. Install or upgrade the Helm chart in the cluster ---
info!(
debug!(
"Deploying Helm chart '{}' to namespace '{}'",
chart_url, app_name
);
@@ -128,7 +131,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
));
}
info!("Successfully deployed '{}' to local k3d cluster.", app_name);
debug!("Successfully deployed '{}' to local k3d cluster.", app_name);
Ok(())
}
}
@@ -148,12 +151,14 @@ impl<
// Or ask for it when unknown
let helm_chart = self.application.build_push_helm_package(&image).await?;
debug!("Pushed new helm chart {helm_chart}");
// TODO: Make building image configurable/skippable if image already exists (prompt)")
// https://git.nationtech.io/NationTech/harmony/issues/104
error!("TODO Make building image configurable/skippable if image already exists (prompt)");
let image = self.application.build_push_oci_image().await?;
debug!("Pushed new docker image {image}");
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
debug!("Installing ContinuousDelivery feature");
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
@@ -166,20 +171,17 @@ impl<
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
debug!("Deploying to target {target:?}");
let score = ArgoHelmScore {
namespace: "harmony-example-rust-webapp".to_string(),
openshift: true,
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
namespace: "harmonydemo-staging".to_string(),
openshift: false,
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
@@ -187,11 +189,12 @@ impl<
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
values_overrides: None,
name: "harmony-demo-rust-webapp".to_string(),
namespace: "harmony-example-rust-webapp".to_string(),
namespace: "harmonydemo-staging".to_string(),
})],
};
score
.interpret(&Inventory::empty(), topology)
.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.unwrap();
}

View File

@@ -1,4 +1,5 @@
use async_trait::async_trait;
use log::error;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::str::FromStr;
@@ -49,21 +50,20 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
self.score.interpret(inventory, topology).await?;
error!("Uncomment below, only disabled for debugging");
self.score
.create_interpret()
.execute(inventory, topology)
.await?;
let k8s_client = topology.k8s_client().await?;
k8s_client
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
.await
.unwrap();
Ok(Outcome::success(format!(
"ArgoCD installed with {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
"Successfully installed ArgoCD and {} Applications",
self.argo_apps.len()
)))
}
@@ -986,7 +986,7 @@ commitServer:
);
HelmChartScore {
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str("argo-cd").unwrap(),
chart_name: NonBlankString::from_str("argo/argo-cd").unwrap(),
chart_version: Some(NonBlankString::from_str("8.1.2").unwrap()),

View File

@@ -1,67 +1,51 @@
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::MultiTargetTopology;
use crate::{
inventory::Inventory,
modules::monitoring::{
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
},
score::Score,
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
};
use crate::{
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver,
};
use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose};
use log::{debug, info};
use crate::{
inventory::Inventory,
modules::{
application::{ApplicationFeature, OCICompliant},
monitoring::{
alert_channel::webhook_receiver::WebhookReceiver,
kube_prometheus::{
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
types::{NamespaceSelector, ServiceMonitor},
},
ntfy::ntfy::NtfyScore,
},
},
score::Score,
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
};
#[derive(Debug, Clone)]
pub struct Monitoring {
pub application: Arc<dyn Application>,
pub alert_receiver: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
pub application: Arc<dyn OCICompliant>,
}
#[async_trait]
impl<
T: Topology
+ HelmCommand
+ 'static
+ TenantManager
+ K8sclient
+ MultiTargetTopology
+ std::fmt::Debug
+ PrometheusApplicationMonitoring<CRDPrometheus>,
> ApplicationFeature<T> for Monitoring
impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> ApplicationFeature<T>
for Monitoring
{
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
info!("Ensuring monitoring is available for application");
let namespace = topology
.get_tenant_config()
.await
.map(|ns| ns.name.clone())
.unwrap_or_else(|| self.application.name());
let mut alerting_score = ApplicationMonitoringScore {
sender: CRDPrometheus {
namespace: namespace.clone(),
client: topology.k8s_client().await.unwrap(),
},
application: self.application.clone(),
receivers: self.alert_receiver.clone(),
};
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
// namespace: topology
// .get_tenant_config()
// .await
// .expect("couldn't get tenant config")
// .name,
namespace: self.application.name(),
host: "localhost".to_string(),
};
ntfy.interpret(&Inventory::empty(), topology)
ntfy.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
.expect("couldn't create interpret for ntfy");
let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
@@ -86,7 +70,7 @@ impl<
url::Url::parse(
format!(
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
namespace.clone()
self.application.name()
)
.as_str(),
)
@@ -94,11 +78,31 @@ impl<
),
};
alerting_score.receivers.push(Box::new(ntfy_receiver));
let mut service_monitor = ServiceMonitor::default();
service_monitor.namespace_selector = Some(NamespaceSelector {
any: true,
match_names: vec![],
});
service_monitor.name = "rust-webapp".to_string();
// let alerting_score = ApplicationPrometheusMonitoringScore {
// receivers: vec![Box::new(ntfy_receiver)],
// rules: vec![],
// service_monitors: vec![service_monitor],
// };
let alerting_score = HelmPrometheusAlertingScore {
receivers: vec![Box::new(ntfy_receiver)],
rules: vec![],
service_monitors: vec![service_monitor],
};
alerting_score
.interpret(&Inventory::empty(), topology)
.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
.unwrap();
Ok(())
}
fn name(&self) -> String {

View File

@@ -10,23 +10,14 @@ pub use oci::*;
pub use rust::*;
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::{Id, Version},
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
topology::Topology,
};
#[derive(Clone, Debug)]
pub enum ApplicationFeatureStatus {
Installing,
Installed,
Failed { details: String },
}
pub trait Application: std::fmt::Debug + Send + Sync {
fn name(&self) -> String;
}
@@ -55,41 +46,20 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
.join(", ")
);
for feature in self.features.iter() {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installing,
})
.unwrap();
debug!(
"Installing feature {} for application {app_name}",
feature.name()
);
let _ = match feature.ensure_installed(topology).await {
Ok(()) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installed,
})
.unwrap();
}
Ok(()) => (),
Err(msg) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Failed {
details: msg.clone(),
},
})
.unwrap();
return Err(InterpretError::new(format!(
"Application Interpret failed to install feature : {msg}"
)));
}
};
}
Ok(Outcome::success("Application created".to_string()))
Ok(Outcome::success("successfully created app".to_string()))
}
fn get_name(&self) -> InterpretName {
@@ -108,12 +78,3 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
todo!()
}
}
impl Serialize for dyn Application {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}

View File

@@ -1,5 +1,5 @@
use std::fs;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::process;
use std::sync::Arc;
@@ -10,7 +10,7 @@ use dockerfile_builder::Dockerfile;
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt;
use log::{debug, info, log_enabled};
use log::{debug, error, log_enabled};
use serde::Serialize;
use tar::Archive;
@@ -46,7 +46,7 @@ where
}
fn name(&self) -> String {
format!("{} [ApplicationScore]", self.application.name())
format!("Application: {}", self.application.name())
}
}
@@ -73,19 +73,19 @@ impl Application for RustWebapp {
#[async_trait]
impl HelmPackage for RustWebapp {
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
info!("Starting Helm chart build and push for '{}'", self.name);
debug!("Starting Helm chart build and push for '{}'", self.name);
// 1. Create the Helm chart files on disk.
let chart_dir = self
.create_helm_chart_files(image_url)
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
info!("Successfully created Helm chart files in {:?}", chart_dir);
debug!("Successfully created Helm chart files in {:?}", chart_dir);
// 2. Package the chart into a .tgz archive.
let packaged_chart_path = self
.package_helm_chart(&chart_dir)
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
info!(
debug!(
"Successfully packaged Helm chart: {}",
packaged_chart_path.to_string_lossy()
);
@@ -94,7 +94,7 @@ impl HelmPackage for RustWebapp {
let oci_chart_url = self
.push_helm_chart(&packaged_chart_path)
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
info!("Successfully pushed Helm chart to: {}", oci_chart_url);
debug!("Successfully pushed Helm chart to: {}", oci_chart_url);
Ok(oci_chart_url)
}
@@ -107,20 +107,20 @@ impl OCICompliant for RustWebapp {
async fn build_push_oci_image(&self) -> Result<String, String> {
// This function orchestrates the build and push process.
// It's async to match the trait definition, though the underlying docker commands are blocking.
info!("Starting OCI image build and push for '{}'", self.name);
debug!("Starting OCI image build and push for '{}'", self.name);
// 1. Build the image by calling the synchronous helper function.
let image_tag = self.image_name();
self.build_docker_image(&image_tag)
.await
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
info!("Successfully built Docker image: {}", image_tag);
debug!("Successfully built Docker image: {}", image_tag);
// 2. Push the image to the registry.
self.push_docker_image(&image_tag)
.await
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
info!("Successfully pushed Docker image to: {}", image_tag);
debug!("Successfully pushed Docker image to: {}", image_tag);
Ok(image_tag)
}
@@ -174,7 +174,7 @@ impl RustWebapp {
.platform("linux/x86_64");
let mut temp_tar_builder = tar::Builder::new(Vec::new());
temp_tar_builder
let _ = temp_tar_builder
.append_dir_all("", self.project_root.clone())
.unwrap();
let archive = temp_tar_builder
@@ -195,7 +195,7 @@ impl RustWebapp {
);
while let Some(msg) = image_build_stream.next().await {
debug!("Message: {msg:?}");
println!("Message: {msg:?}");
}
Ok(image_name.to_string())
@@ -219,7 +219,7 @@ impl RustWebapp {
);
while let Some(msg) = push_image_stream.next().await {
debug!("Message: {msg:?}");
println!("Message: {msg:?}");
}
Ok(image_tag.to_string())
@@ -288,8 +288,9 @@ impl RustWebapp {
.unwrap(),
);
// Copy the compiled binary from the builder stage.
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/home/appuser/{}", self.name);
dockerfile.push(
@@ -327,8 +328,9 @@ impl RustWebapp {
));
// Copy only the compiled binary from the builder stage.
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
dockerfile.push(
@@ -528,7 +530,10 @@ spec:
}
/// Packages a Helm chart directory into a .tgz file.
fn package_helm_chart(&self, chart_dir: &Path) -> Result<PathBuf, Box<dyn std::error::Error>> {
fn package_helm_chart(
&self,
chart_dir: &PathBuf,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname");
debug!(
"Launching `helm package {}` cli with CWD {}",
@@ -541,13 +546,14 @@ spec:
);
let output = process::Command::new("helm")
.args(["package", chart_dirname.to_str().unwrap()])
.current_dir(self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
.output()?;
self.check_output(&output, "Failed to package Helm chart")?;
// Helm prints the path of the created chart to stdout.
let tgz_name = String::from_utf8(output.stdout)?
.trim()
.split_whitespace()
.last()
.unwrap_or_default()
@@ -567,7 +573,7 @@ spec:
/// Pushes a packaged Helm chart to an OCI registry.
fn push_helm_chart(
&self,
packaged_chart_path: &Path,
packaged_chart_path: &PathBuf,
) -> Result<String, Box<dyn std::error::Error>> {
// The chart name is the file stem of the .tgz file
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();

View File

@@ -41,6 +41,6 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
}
fn name(&self) -> String {
"CertManagerHelmScore".to_string()
format!("CertManagerHelmScore")
}
}

View File

@@ -7,7 +7,7 @@ use crate::{
domain::{data::Version, interpret::InterpretStatus},
interpret::{Interpret, InterpretError, InterpretName, Outcome},
inventory::Inventory,
topology::{DHCPStaticEntry, DhcpServer, HostBinding, IpAddress, PxeOptions, Topology},
topology::{DHCPStaticEntry, DhcpServer, HostBinding, IpAddress, Topology},
};
use crate::domain::score::Score;
@@ -98,14 +98,69 @@ impl DhcpInterpret {
_inventory: &Inventory,
dhcp_server: &D,
) -> Result<Outcome, InterpretError> {
let pxe_options = PxeOptions {
ipxe_filename: self.score.filenameipxe.clone().unwrap_or_default(),
bios_filename: self.score.filename.clone().unwrap_or_default(),
efi_filename: self.score.filename64.clone().unwrap_or_default(),
tftp_ip: self.score.next_server,
let next_server_outcome = match self.score.next_server {
Some(next_server) => {
dhcp_server.set_next_server(next_server).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set next boot to {next_server}"),
)
}
None => Outcome::noop(),
};
dhcp_server.set_pxe_options(pxe_options).await?;
let boot_filename_outcome = match &self.score.boot_filename {
Some(boot_filename) => {
dhcp_server.set_boot_filename(&boot_filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set boot filename to {boot_filename}"),
)
}
None => Outcome::noop(),
};
let filename_outcome = match &self.score.filename {
Some(filename) => {
dhcp_server.set_filename(&filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename to {filename}"),
)
}
None => Outcome::noop(),
};
let filename64_outcome = match &self.score.filename64 {
Some(filename64) => {
dhcp_server.set_filename64(&filename64).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename64 to {filename64}"),
)
}
None => Outcome::noop(),
};
let filenameipxe_outcome = match &self.score.filenameipxe {
Some(filenameipxe) => {
dhcp_server.set_filenameipxe(&filenameipxe).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"),
)
}
None => Outcome::noop(),
};
if next_server_outcome.status == InterpretStatus::NOOP
&& boot_filename_outcome.status == InterpretStatus::NOOP
&& filename_outcome.status == InterpretStatus::NOOP
&& filename64_outcome.status == InterpretStatus::NOOP
&& filenameipxe_outcome.status == InterpretStatus::NOOP
{
return Ok(Outcome::noop());
}
Ok(Outcome::new(
InterpretStatus::SUCCESS,
@@ -154,7 +209,7 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dhcp Interpret execution successful".to_string(),
format!("Dhcp Interpret execution successful"),
))
}
}

View File

@@ -112,7 +112,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dns Interpret execution successful".to_string(),
format!("Dns Interpret execution successful"),
))
}
}

View File

@@ -55,7 +55,7 @@ impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
}
fn name(&self) -> String {
format!("{} [HelmChartScore]", self.release_name)
format!("{} {} HelmChartScore", self.release_name, self.chart_name)
}
}
@@ -90,10 +90,14 @@ impl HelmChartInterpret {
);
match add_output.status.success() {
true => Ok(()),
false => Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
))),
true => {
return Ok(());
}
false => {
return Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
)));
}
}
}
}
@@ -208,7 +212,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
}
let res = helm_executor.install_or_upgrade(
ns,
&ns,
&self.score.release_name,
&self.score.chart_name,
self.score.chart_version.as_ref(),
@@ -225,27 +229,24 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
match status {
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Helm Chart {} deployed", self.score.release_name),
"Helm Chart deployed".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending install...", self.score.release_name),
"Helm Chart Pending install".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending upgrade...", self.score.release_name),
"Helm Chart pending upgrade".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(
"Failed to install helm chart".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
"Helm Chart {} installation failed",
self.score.release_name
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmChart
todo!()
}
fn get_version(&self) -> Version {
todo!()
}

View File

@@ -77,11 +77,14 @@ impl HelmCommandExecutor {
)?;
}
let out = self.clone().run_command(
let out = match self.clone().run_command(
self.chart
.clone()
.helm_args(self.globals.chart_home.clone().unwrap()),
)?;
) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap here
let s = String::from_utf8(out.stdout).unwrap();
@@ -95,11 +98,14 @@ impl HelmCommandExecutor {
}
pub fn version(self) -> Result<String, std::io::Error> {
let out = self.run_command(vec![
let out = match self.run_command(vec![
"version".to_string(),
"-c".to_string(),
"--short".to_string(),
])?;
]) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap
Ok(String::from_utf8(out.stdout).unwrap())
@@ -123,11 +129,15 @@ impl HelmCommandExecutor {
None => PathBuf::from(TempDir::new()?.path()),
};
if let Some(yaml_str) = self.chart.values_inline {
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
match self.chart.values_inline {
Some(yaml_str) => {
let tf: TempFile;
tf = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
}
None => (),
};
self.env.insert(
@@ -170,9 +180,9 @@ impl HelmChart {
match self.repo {
Some(r) => {
if r.starts_with("oci://") {
args.push(
args.push(String::from(
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
);
));
} else {
args.push("--repo".to_string());
args.push(r.to_string());
@@ -183,9 +193,12 @@ impl HelmChart {
None => args.push(self.name),
};
if let Some(v) = self.version {
args.push("--version".to_string());
args.push(v.to_string());
match self.version {
Some(v) => {
args.push("--version".to_string());
args.push(v.to_string());
}
None => (),
}
args
@@ -349,7 +362,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmCommand
todo!()
}
fn get_version(&self) -> Version {
todo!()

View File

@@ -3,7 +3,7 @@ use derive_new::new;
use serde::Serialize;
use crate::{
data::{FileContent, Id, Version},
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
@@ -23,8 +23,7 @@ use crate::{
/// ```
#[derive(Debug, new, Clone, Serialize)]
pub struct StaticFilesHttpScore {
pub folder_to_serve: Option<Url>,
pub files: Vec<FileContent>,
files_to_serve: Url,
}
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
@@ -51,20 +50,12 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
) -> Result<Outcome, InterpretError> {
http_server.ensure_initialized().await?;
// http_server.set_ip(topology.router.get_gateway()).await?;
if let Some(folder) = self.score.folder_to_serve.as_ref() {
http_server.serve_files(folder).await?;
}
for f in self.score.files.iter() {
http_server.serve_file_content(&f).await?
}
http_server.serve_files(&self.score.files_to_serve).await?;
http_server.commit_config().await?;
http_server.reload_restart().await?;
Ok(Outcome::success(format!(
"Http Server running and serving files from folder {:?} and content for {}",
self.score.folder_to_serve,
self.score.files.iter().map(|f| f.path.to_string()).collect::<Vec<String>>().join(",")
"Http Server running and serving files from {}",
self.score.files_to_serve
)))
}

View File

@@ -1,12 +1,13 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::debug;
use log::{debug, info};
use serde::Serialize;
use crate::{
config::HARMONY_DATA_DIR,
data::{Id, Version},
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
@@ -29,14 +30,14 @@ impl Default for K3DInstallationScore {
}
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(K3dInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
"K3dInstallationScore".into()
todo!()
}
}
@@ -50,14 +51,20 @@ impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
topology: &T,
) -> Result<Outcome, InterpretError> {
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
topology: topology.name().into(),
interpret: "k3d-installation".into(),
message: "installing k3d...".into(),
})
.unwrap();
let k3d = k3d_rs::K3d::new(
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
match k3d.ensure_installed().await {
let outcome = match k3d.ensure_installed().await {
Ok(_client) => {
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
debug!("{msg}");
@@ -66,7 +73,16 @@ impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
Err(msg) => Err(InterpretError::new(format!(
"failed to ensure k3d is installed : {msg}"
))),
}
};
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
topology: topology.name().into(),
interpret: "k3d-installation".into(),
outcome: outcome.clone(),
})
.unwrap();
outcome
}
fn get_name(&self) -> InterpretName {
InterpretName::K3dInstallation

View File

@@ -89,7 +89,7 @@ where
))
}
fn get_name(&self) -> InterpretName {
InterpretName::K8sResource
todo!()
}
fn get_version(&self) -> Version {
todo!()

View File

@@ -128,12 +128,13 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
info!("Deploying score {deployment_score:#?}");
deployment_score.interpret(inventory, topology).await?;
deployment_score
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP deployment_score {deployment_score:?}");
let ingress_path = ingress_path!("/");
let lamp_ingress = K8sIngressScore {
name: fqdn!("lamp-ingress"),
host: fqdn!("test"),
@@ -143,14 +144,17 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
.as_str()
),
port: 8080,
path: Some(ingress_path),
path: Some(ingress_path!("/")),
path_type: None,
namespace: self
.get_namespace()
.map(|nbs| fqdn!(nbs.to_string().as_str())),
};
lamp_ingress.interpret(inventory, topology).await?;
lamp_ingress
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP lamp_ingress {lamp_ingress:?}");
@@ -160,7 +164,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
}
fn get_name(&self) -> InterpretName {
InterpretName::Lamp
todo!()
}
fn get_version(&self) -> Version {
@@ -209,7 +213,7 @@ impl LAMPInterpret {
repository: None,
};
score.interpret(inventory, topology).await
score.create_interpret().execute(inventory, topology).await
}
fn build_dockerfile(&self, score: &LAMPScore) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut dockerfile = Dockerfile::new();

View File

@@ -14,6 +14,5 @@ pub mod monitoring;
pub mod okd;
pub mod opnsense;
pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;

View File

@@ -1,16 +1,7 @@
use std::any::Any;
use std::collections::BTreeMap;
use async_trait::async_trait;
use k8s_openapi::api::core::v1::Secret;
use kube::api::ObjectMeta;
use serde::Serialize;
use serde_json::json;
use serde_yaml::{Mapping, Value};
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
};
use crate::{
interpret::{InterpretError, Outcome},
modules::monitoring::{
@@ -29,98 +20,14 @@ pub struct DiscordWebhook {
pub url: Url,
}
#[async_trait]
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
let ns = sender.namespace.clone();
let secret_name = format!("{}-secret", self.name.clone());
let webhook_key = format!("{}", self.url.clone());
let mut string_data = BTreeMap::new();
string_data.insert("webhook-url".to_string(), webhook_key.clone());
let secret = Secret {
metadata: kube::core::ObjectMeta {
name: Some(secret_name.clone()),
..Default::default()
},
string_data: Some(string_data),
type_: Some("Opaque".to_string()),
..Default::default()
};
let _ = sender.client.apply(&secret, Some(&ns)).await;
let spec = AlertmanagerConfigSpec {
data: json!({
"route": {
"receiver": self.name,
},
"receivers": [
{
"name": self.name,
"discordConfigs": [
{
"apiURL": {
"name": secret_name,
"key": "webhook-url",
},
"title": "{{ template \"discord.default.title\" . }}",
"message": "{{ template \"discord.default.message\" . }}"
}
]
}
]
}),
};
let alertmanager_configs = AlertmanagerConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(ns),
..Default::default()
},
spec,
};
sender
.client
.apply(&alertmanager_configs, Some(&sender.namespace))
.await?;
Ok(Outcome::success(format!(
"installed crd-alertmanagerconfigs for {}",
self.name
)))
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<CRDPrometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
impl AlertReceiver<Prometheus> for DiscordWebhook {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_receiver(self).await
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<Prometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
@@ -141,12 +48,6 @@ impl AlertReceiver<KubePrometheus> for DiscordWebhook {
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
Box::new(self.clone())
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]

View File

@@ -1,19 +1,11 @@
use std::any::Any;
use async_trait::async_trait;
use kube::api::ObjectMeta;
use log::debug;
use serde::Serialize;
use serde_json::json;
use serde_yaml::{Mapping, Value};
use crate::{
interpret::{InterpretError, Outcome},
modules::monitoring::{
kube_prometheus::{
crd::crd_alertmanager_config::{
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
},
prometheus::{KubePrometheus, KubePrometheusReceiver},
types::{AlertChannelConfig, AlertManagerChannelConfig},
},
@@ -28,81 +20,14 @@ pub struct WebhookReceiver {
pub url: Url,
}
#[async_trait]
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
let spec = AlertmanagerConfigSpec {
data: json!({
"route": {
"receiver": self.name,
},
"receivers": [
{
"name": self.name,
"webhookConfigs": [
{
"url": self.url,
}
]
}
]
}),
};
let alertmanager_configs = AlertmanagerConfig {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(sender.namespace.clone()),
..Default::default()
},
spec,
};
debug!(
"alert manager configs: \n{:#?}",
alertmanager_configs.clone()
);
sender
.client
.apply(&alertmanager_configs, Some(&sender.namespace))
.await?;
Ok(Outcome::success(format!(
"installed crd-alertmanagerconfigs for {}",
self.name
)))
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<CRDPrometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
impl AlertReceiver<Prometheus> for WebhookReceiver {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_receiver(self).await
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<Prometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]
@@ -119,15 +44,9 @@ impl AlertReceiver<KubePrometheus> for WebhookReceiver {
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
sender.install_receiver(self).await
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
Box::new(self.clone())
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[async_trait]

View File

@@ -18,7 +18,7 @@ use crate::{
#[async_trait]
impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(self).await
sender.install_rule(&self).await
}
fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> {
Box::new(self.clone())
@@ -28,7 +28,7 @@ impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
#[async_trait]
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(self).await
sender.install_rule(&self).await
}
fn clone_box(&self) -> Box<dyn AlertRule<Prometheus>> {
Box::new(self.clone())

View File

@@ -1,91 +0,0 @@
use std::sync::Arc;
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::{
application::Application,
monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
prometheus::prometheus::PrometheusApplicationMonitoring,
},
score::Score,
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
};
#[derive(Debug, Clone, Serialize)]
pub struct ApplicationMonitoringScore {
pub sender: CRDPrometheus,
pub application: Arc<dyn Application>,
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
}
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
for ApplicationMonitoringScore
{
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(ApplicationMonitoringInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
format!(
"{} monitoring [ApplicationMonitoringScore]",
self.application.name()
)
}
}
#[derive(Debug)]
pub struct ApplicationMonitoringInterpret {
score: ApplicationMonitoringScore,
}
#[async_trait]
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
for ApplicationMonitoringInterpret
{
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let result = topology
.install_prometheus(
&self.score.sender,
inventory,
Some(self.score.receivers.clone()),
)
.await;
match result {
Ok(outcome) => match outcome {
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => Ok(Outcome::noop()),
},
Err(err) => Err(InterpretError::from(err)),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::ApplicationMonitoring
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,44 @@
use std::sync::{Arc, Mutex};
use serde::Serialize;
use crate::{
modules::monitoring::{
kube_prometheus::types::ServiceMonitor,
prometheus::{prometheus::Prometheus, prometheus_config::PrometheusConfig},
},
score::Score,
topology::{
HelmCommand, Topology,
oberservability::monitoring::{AlertReceiver, AlertRule, AlertingInterpret},
tenant::TenantManager,
},
};
#[derive(Clone, Debug, Serialize)]
pub struct ApplicationPrometheusMonitoringScore {
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
pub service_monitors: Vec<ServiceMonitor>,
}
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
let mut prom_config = PrometheusConfig::new();
prom_config.alert_manager = true;
let config = Arc::new(Mutex::new(prom_config));
config
.try_lock()
.expect("couldn't lock config")
.additional_service_monitors = self.service_monitors.clone();
Box::new(AlertingInterpret {
sender: Prometheus::new(),
receivers: self.receivers.clone(),
rules: self.rules.clone(),
})
}
fn name(&self) -> String {
"ApplicationPrometheusMonitoringScore".to_string()
}
}

View File

@@ -1 +1 @@
pub mod application_monitoring_score;
pub mod k8s_application_monitoring_score;

View File

@@ -4,14 +4,15 @@ use std::str::FromStr;
use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
let values = r#"
let values = format!(
r#"
rbac:
namespaced: true
sidecar:
dashboards:
enabled: true
"#
.to_string();
);
HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()),

View File

@@ -1,50 +0,0 @@
use std::sync::Arc;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::topology::{
k8s::K8sClient,
oberservability::monitoring::{AlertReceiver, AlertSender},
};
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1alpha1",
kind = "AlertmanagerConfig",
plural = "alertmanagerconfigs",
namespaced
)]
pub struct AlertmanagerConfigSpec {
#[serde(flatten)]
pub data: serde_json::Value,
}
#[derive(Debug, Clone, Serialize)]
pub struct CRDPrometheus {
pub namespace: String,
pub client: Arc<K8sClient>,
}
impl AlertSender for CRDPrometheus {
fn name(&self) -> String {
"CRDAlertManager".to_string()
}
}
impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
fn clone(&self) -> Self {
self.clone_box()
}
}
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!()
}
}

View File

@@ -1,52 +0,0 @@
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use super::crd_prometheuses::LabelSelector;
/// Rust CRD for `Alertmanager` from Prometheus Operator
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "Alertmanager",
plural = "alertmanagers",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct AlertmanagerSpec {
/// Number of replicas for HA
pub replicas: i32,
/// Selectors for AlertmanagerConfig CRDs
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alertmanager_config_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alertmanager_config_namespace_selector: Option<LabelSelector>,
/// Optional pod template metadata (annotations, labels)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pod_metadata: Option<LabelSelector>,
/// Optional topology spread settings
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
impl Default for AlertmanagerSpec {
fn default() -> Self {
AlertmanagerSpec {
replicas: 1,
// Match all AlertmanagerConfigs in the same namespace
alertmanager_config_namespace_selector: None,
// Empty selector matches all AlertmanagerConfigs in that namespace
alertmanager_config_selector: Some(LabelSelector::default()),
pod_metadata: None,
version: None,
}
}
}

View File

@@ -1,25 +0,0 @@
use crate::modules::prometheus::alerts::k8s::{
deployment::alert_deployment_unavailable,
pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
pvc::high_pvc_fill_rate_over_two_days,
service::alert_service_down,
};
use super::crd_prometheus_rules::Rule;
pub fn build_default_application_rules() -> Vec<Rule> {
let pod_failed: Rule = pod_failed().into();
let container_restarting: Rule = alert_container_restarting().into();
let pod_not_ready: Rule = alert_pod_not_ready().into();
let service_down: Rule = alert_service_down().into();
let deployment_unavailable: Rule = alert_deployment_unavailable().into();
let high_pvc_fill_rate: Rule = high_pvc_fill_rate_over_two_days().into();
vec![
pod_failed,
container_restarting,
pod_not_ready,
service_down,
deployment_unavailable,
high_pvc_fill_rate,
]
}

View File

@@ -1,153 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use super::crd_prometheuses::LabelSelector;
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "Grafana",
plural = "grafanas",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub config: Option<GrafanaConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_user: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ingress: Option<GrafanaIngress>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub persistence: Option<GrafanaPersistence>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resources: Option<ResourceRequirements>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub log: Option<GrafanaLogConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub security: Option<GrafanaSecurityConfig>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaLogConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub mode: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSecurityConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_user: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaIngress {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hosts: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaPersistence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub storage_class_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
}
// ------------------------------------------------------------------------------------------------
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "GrafanaDashboard",
plural = "grafanadashboards",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDashboardSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resync_period: Option<String>,
pub instance_selector: LabelSelector,
pub json: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "grafana.integreatly.org",
version = "v1beta1",
kind = "GrafanaDatasource",
plural = "grafanadatasources",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceSpec {
pub instance_selector: LabelSelector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub allow_cross_namespace_import: Option<bool>,
pub datasource: GrafanaDatasourceConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceConfig {
pub access: String,
pub database: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub json_data: Option<BTreeMap<String, String>>,
pub name: String,
pub r#type: String,
pub url: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct ResourceRequirements {
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub limits: BTreeMap<String, String>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub requests: BTreeMap<String, String>,
}

View File

@@ -1,57 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "PrometheusRule",
plural = "prometheusrules",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct PrometheusRuleSpec {
pub groups: Vec<RuleGroup>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct RuleGroup {
pub name: String,
pub rules: Vec<Rule>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct Rule {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alert: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub expr: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub for_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub labels: Option<std::collections::BTreeMap<String, String>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub annotations: Option<std::collections::BTreeMap<String, String>>,
}
impl From<PrometheusAlertRule> for Rule {
fn from(value: PrometheusAlertRule) -> Self {
Rule {
alert: Some(value.alert),
expr: Some(value.expr),
for_: value.r#for,
labels: Some(value.labels.into_iter().collect::<BTreeMap<_, _>>()),
annotations: Some(value.annotations.into_iter().collect::<BTreeMap<_, _>>()),
}
}
}

View File

@@ -1,118 +0,0 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::types::Operator;
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "Prometheus",
plural = "prometheuses",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct PrometheusSpec {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alerting: Option<PrometheusSpecAlerting>,
pub service_account_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_monitor_namespace_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_monitor_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub service_discovery_role: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pod_monitor_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rule_selector: Option<LabelSelector>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rule_namespace_selector: Option<LabelSelector>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct NamespaceSelector {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_names: Vec<String>,
}
/// Contains alerting configuration, specifically Alertmanager endpoints.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
pub struct PrometheusSpecAlerting {
#[serde(skip_serializing_if = "Option::is_none")]
pub alertmanagers: Option<Vec<AlertmanagerEndpoints>>,
}
/// Represents an Alertmanager endpoint configuration used by Prometheus.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
pub struct AlertmanagerEndpoints {
/// Name of the Alertmanager Service.
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// Namespace of the Alertmanager Service.
#[serde(skip_serializing_if = "Option::is_none")]
pub namespace: Option<String>,
/// Port to access on the Alertmanager Service (e.g. "web").
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<String>,
/// Scheme to use for connecting (e.g. "http").
#[serde(skip_serializing_if = "Option::is_none")]
pub scheme: Option<String>,
// Other fields like `tls_config`, `path_prefix`, etc., can be added if needed.
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct LabelSelector {
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub match_labels: BTreeMap<String, String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub match_expressions: Vec<LabelSelectorRequirement>,
}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct LabelSelectorRequirement {
pub key: String,
pub operator: Operator,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
}
impl Default for PrometheusSpec {
fn default() -> Self {
PrometheusSpec {
alerting: None,
service_account_name: "prometheus".into(),
// null means "only my namespace"
service_monitor_namespace_selector: None,
// empty selector means match all ServiceMonitors in that namespace
service_monitor_selector: Some(LabelSelector::default()),
service_discovery_role: Some("Endpoints".into()),
pod_monitor_selector: None,
rule_selector: None,
rule_namespace_selector: Some(LabelSelector::default()),
}
}
}

View File

@@ -1,203 +0,0 @@
pub fn build_default_dashboard(namespace: &str) -> String {
let dashboard = format!(
r#"{{
"annotations": {{
"list": []
}},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 171105,
"panels": [
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 6,
"x": 0,
"y": 0
}},
"id": 1,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(kube_pod_status_phase{{namespace=\"{namespace}\", phase=\"Running\"}})",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Pods in Namespace",
"type": "stat"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 6,
"x": 6,
"y": 0
}},
"id": 2,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(kube_pod_status_phase{{phase=\"Failed\", namespace=\"{namespace}\"}})",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Pods in Failed State",
"type": "stat"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "percentunit"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 12,
"x": 0,
"y": 6
}},
"id": 3,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(kube_deployment_status_replicas_available{{namespace=\"{namespace}\"}}) / sum(kube_deployment_spec_replicas{{namespace=\"{namespace}\"}})",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Deployment Health (Available / Desired)",
"type": "stat"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 12,
"x": 0,
"y": 12
}},
"id": 4,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum by(pod) (rate(kube_pod_container_status_restarts_total{{namespace=\"{namespace}\"}}[5m]))",
"legendFormat": "{{{{pod}}}}",
"refId": "A"
}}
],
"title": "Container Restarts (per pod)",
"type": "timeseries"
}},
{{
"datasource": "$datasource",
"fieldConfig": {{
"defaults": {{
"unit": "short"
}},
"overrides": []
}},
"gridPos": {{
"h": 6,
"w": 12,
"x": 0,
"y": 18
}},
"id": 5,
"options": {{
"reduceOptions": {{
"calcs": ["lastNotNull"],
"fields": "",
"values": false
}}
}},
"pluginVersion": "9.0.0",
"targets": [
{{
"expr": "sum(ALERTS{{alertstate=\"firing\", namespace=\"{namespace}\"}}) or vector(0)",
"legendFormat": "",
"refId": "A"
}}
],
"title": "Firing Alerts in Namespace",
"type": "stat"
}}
],
"schemaVersion": 36,
"templating": {{
"list": [
{{
"name": "datasource",
"type": "datasource",
"pluginId": "prometheus",
"label": "Prometheus",
"query": "prometheus",
"refresh": 1,
"hide": 0,
"current": {{
"selected": true,
"text": "Prometheus",
"value": "Prometheus"
}}
}}
]
}},
"title": "Tenant Namespace Overview",
"version": 1
}}"#
);
dashboard
}

View File

@@ -1,20 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_operator_helm_chart_score(ns: String) -> HelmChartScore {
HelmChartScore {
namespace: Some(NonBlankString::from_str(&ns).unwrap()),
release_name: NonBlankString::from_str("grafana_operator").unwrap(),
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana-operator")
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: None,
create_namespace: true,
install_only: true,
repository: None,
}
}

View File

@@ -1,11 +0,0 @@
pub mod crd_alertmanager_config;
pub mod crd_alertmanagers;
pub mod crd_default_rules;
pub mod crd_grafana;
pub mod crd_prometheus_rules;
pub mod crd_prometheuses;
pub mod grafana_default_dashboard;
pub mod grafana_operator;
pub mod prometheus_operator;
pub mod role;
pub mod service_monitor;

View File

@@ -1,22 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
pub fn prometheus_operator_helm_chart_score(ns: String) -> HelmChartScore {
HelmChartScore {
namespace: Some(NonBlankString::from_str(&ns).unwrap()),
release_name: NonBlankString::from_str("prometheus-operator").unwrap(),
chart_name: NonBlankString::from_str(
"oci://hub.nationtech.io/harmony/nt-prometheus-operator",
)
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: None,
create_namespace: true,
install_only: true,
repository: None,
}
}

View File

@@ -1,62 +0,0 @@
use k8s_openapi::api::{
core::v1::ServiceAccount,
rbac::v1::{PolicyRule, Role, RoleBinding, RoleRef, Subject},
};
use kube::api::ObjectMeta;
pub fn build_prom_role(role_name: String, namespace: String) -> Role {
Role {
metadata: ObjectMeta {
name: Some(role_name),
namespace: Some(namespace),
..Default::default()
},
rules: Some(vec![PolicyRule {
api_groups: Some(vec!["".into()]), // core API group
resources: Some(vec!["services".into(), "endpoints".into(), "pods".into()]),
verbs: vec!["get".into(), "list".into(), "watch".into()],
..Default::default()
}]),
}
}
pub fn build_prom_rolebinding(
role_name: String,
namespace: String,
service_account_name: String,
) -> RoleBinding {
RoleBinding {
metadata: ObjectMeta {
name: Some(format!("{}-rolebinding", role_name)),
namespace: Some(namespace.clone()),
..Default::default()
},
role_ref: RoleRef {
api_group: "rbac.authorization.k8s.io".into(),
kind: "Role".into(),
name: role_name,
},
subjects: Some(vec![Subject {
kind: "ServiceAccount".into(),
name: service_account_name,
namespace: Some(namespace.clone()),
..Default::default()
}]),
}
}
pub fn build_prom_service_account(
service_account_name: String,
namespace: String,
) -> ServiceAccount {
ServiceAccount {
automount_service_account_token: None,
image_pull_secrets: None,
metadata: ObjectMeta {
name: Some(service_account_name),
namespace: Some(namespace),
..Default::default()
},
secrets: None,
}
}

View File

@@ -1,87 +0,0 @@
use std::collections::HashMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::types::{
HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector,
ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint,
};
/// This is the top-level struct for the ServiceMonitor Custom Resource.
/// The `#[derive(CustomResource)]` macro handles all the boilerplate for you,
/// including the `impl Resource`.
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",
version = "v1",
kind = "ServiceMonitor",
plural = "servicemonitors",
namespaced
)]
#[serde(rename_all = "camelCase")]
pub struct ServiceMonitorSpec {
/// A label selector to select services to monitor.
pub selector: Selector,
/// A list of endpoints on the selected services to be monitored.
pub endpoints: Vec<ServiceMonitorEndpoint>,
/// Selector to select which namespaces the Kubernetes Endpoints objects
/// are discovered from.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace_selector: Option<NamespaceSelector>,
/// The label to use to retrieve the job name from.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub job_label: Option<String>,
/// Pod-based target labels to transfer from the Kubernetes Pod onto the target.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub pod_target_labels: Vec<String>,
/// TargetLabels transfers labels on the Kubernetes Service object to the target.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub target_labels: Vec<String>,
}
impl Default for ServiceMonitorSpec {
fn default() -> Self {
let labels = HashMap::new();
Self {
selector: Selector {
match_labels: { labels },
match_expressions: vec![MatchExpression {
key: "app.kubernetes.io/name".into(),
operator: Operator::Exists,
values: vec![],
}],
},
endpoints: vec![ServiceMonitorEndpoint {
port: Some("http".to_string()),
path: Some("/metrics".into()),
interval: Some("30s".into()),
scheme: Some(HTTPScheme::HTTP),
..Default::default()
}],
namespace_selector: None, // only the same namespace
job_label: Some("app".into()),
pod_target_labels: vec![],
target_labels: vec![],
}
}
}
impl From<KubeServiceMonitor> for ServiceMonitorSpec {
fn from(value: KubeServiceMonitor) -> Self {
Self {
selector: value.selector,
endpoints: value.endpoints,
namespace_selector: value.namespace_selector,
job_label: value.job_label,
pod_target_labels: value.pod_target_labels,
target_labels: value.target_labels,
}
}
}

View File

@@ -27,12 +27,6 @@ pub struct KubePrometheusConfig {
pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
pub additional_service_monitors: Vec<ServiceMonitor>,
}
impl Default for KubePrometheusConfig {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheusConfig {
pub fn new() -> Self {
Self {
@@ -41,7 +35,7 @@ impl KubePrometheusConfig {
windows_monitoring: false,
alert_manager: true,
grafana: true,
node_exporter: true,
node_exporter: false,
prometheus: true,
kubernetes_service_monitors: true,
kubernetes_api_server: true,

View File

@@ -12,8 +12,8 @@ use crate::modules::{
helm::chart::HelmChartScore,
monitoring::kube_prometheus::types::{
AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig,
AlertManagerConfigSelector, AlertManagerRoute, AlertManagerSpec, AlertManagerValues,
ConfigReloader, Limits, PrometheusConfig, Requests, Resources,
AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits,
PrometheusConfig, Requests, Resources,
},
};
@@ -35,7 +35,7 @@ pub fn kube_prometheus_helm_chart_score(
let kube_proxy = config.kube_proxy.to_string();
let kube_state_metrics = config.kube_state_metrics.to_string();
let node_exporter = config.node_exporter.to_string();
let _prometheus_operator = config.prometheus_operator.to_string();
let prometheus_operator = config.prometheus_operator.to_string();
let prometheus = config.prometheus.to_string();
let resource_limit = Resources {
limits: Limits {
@@ -64,7 +64,7 @@ pub fn kube_prometheus_helm_chart_score(
indent_lines(&yaml, indent_level + 2)
)
}
let _resource_section = resource_block(&resource_limit, 2);
let resource_section = resource_block(&resource_limit, 2);
let mut values = format!(
r#"
@@ -332,11 +332,6 @@ prometheusOperator:
.push(receiver.channel_receiver.clone());
}
let mut labels = BTreeMap::new();
labels.insert("alertmanagerConfig".to_string(), "enabled".to_string());
let alert_manager_config_selector = AlertManagerConfigSelector {
match_labels: labels,
};
let alert_manager_values = AlertManagerValues {
alertmanager: AlertManager {
enabled: config.alert_manager,
@@ -352,8 +347,6 @@ prometheusOperator:
cpu: "100m".to_string(),
},
},
alert_manager_config_selector,
replicas: 2,
},
init_config_reloader: ConfigReloader {
resources: Resources {

View File

@@ -1,4 +1,3 @@
pub mod crd;
pub mod helm;
pub mod helm_prometheus_alert_score;
pub mod prometheus;

Some files were not shown because too many files have changed in this diff Show More