Compare commits

..

6 Commits

Author SHA1 Message Date
a559920ecd testing alert receiver score in sttest 2026-04-13 13:55:47 -04:00
5891d07bb4 Merge pull request 'feat/arm-cross-compilation' (#259) from feat/arm-cross-compilation into master
Some checks failed
Compile and package harmony_composer / package_harmony_composer (push) Failing after 1m54s
Run Check Script / check (push) Failing after 14m34s
Reviewed-on: #259
2026-04-11 11:13:49 +00:00
c9fb0a4236 Merge pull request 'feat: Refactor load balancer to remove side effect and improve types' (#258) from feat/removesideeffect into master
Some checks failed
Compile and package harmony_composer / package_harmony_composer (push) Failing after 10m55s
Run Check Script / check (push) Failing after 11m5s
Reviewed-on: #258
2026-04-10 15:17:23 +00:00
abb57b4059 fix(build): mark cross-arm.sh executable
All checks were successful
Run Check Script / check (pull_request) Successful in 1m54s
2026-04-10 08:22:53 -04:00
0b451c6f35 feat(build): aarch64 cross-compilation for agent crates
Adds ARM (Raspberry Pi) cross-compilation support for harmony_agent
and harmony_inventory_agent.

- .cargo/config.toml: aarch64-linux-gnu-gcc linker
- build/cross-arm.sh: local cross-build script with prereq checks
- .gitea/workflows/arm-agents.yaml: CI workflow that builds both
  agents for aarch64 on tagged releases and uploads them as
  release assets (same pattern as harmony_inventory_agent already
  uses)

Removes the unused 'harmony' dependency from harmony_agent — it
was vestigial (no actual imports) and was the only thing pulling
in heavy C deps (libvirt via virt) that blocked ARM builds.
2026-04-10 07:09:03 -04:00
f02f6ac0c3 feat: Refactor load balancer to remove side effect and improve types
All checks were successful
Run Check Script / check (pull_request) Successful in 2m20s
2026-04-09 22:50:10 -04:00
24 changed files with 286 additions and 314 deletions

View File

@@ -3,3 +3,6 @@ rustflags = ["-C", "link-arg=/STACK:8000000"]
[target.x86_64-pc-windows-gnu]
rustflags = ["-C", "link-arg=-Wl,--stack,8000000"]
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"

View File

@@ -0,0 +1,78 @@
name: Build ARM agent binaries
on:
push:
tags:
- 'v*'
- 'snapshot-*'
workflow_dispatch:
jobs:
build_arm_agents:
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
runs-on: docker
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
fetch-depth: 1
- name: Install ARM cross-compilation toolchain
run: |
apt-get update -qq
apt-get install -y -qq gcc-aarch64-linux-gnu
rustup target add aarch64-unknown-linux-gnu
- name: Build agent crates for aarch64
run: |
cargo build --release --target aarch64-unknown-linux-gnu \
-p harmony_agent \
-p harmony_inventory_agent
- name: Install jq
run: apt-get install -y -qq jq
- name: Get or create release
run: |
TAG_NAME="${GITHUB_REF_NAME}"
# Try to get existing release
RELEASE_ID=$(curl -s -X GET \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/${TAG_NAME}" \
| jq -r '.id // empty')
if [ -z "$RELEASE_ID" ]; then
# Create new release
RESPONSE=$(curl -s -X POST \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
-H "Content-Type: application/json" \
-d "{
\"tag_name\": \"${TAG_NAME}\",
\"name\": \"${TAG_NAME}\",
\"body\": \"Release ${TAG_NAME}\",
\"draft\": false,
\"prerelease\": true
}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
RELEASE_ID=$(echo "$RESPONSE" | jq -r '.id')
fi
echo "RELEASE_ID=$RELEASE_ID" >> $GITHUB_ENV
- name: Upload harmony_agent ARM binary
run: |
curl -X POST \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
-H "Content-Type: application/octet-stream" \
--data-binary "@target/aarch64-unknown-linux-gnu/release/harmony_agent" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/${{ env.RELEASE_ID }}/assets?name=harmony_agent-aarch64-linux"
- name: Upload harmony_inventory_agent ARM binary
run: |
curl -X POST \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
-H "Content-Type: application/octet-stream" \
--data-binary "@target/aarch64-unknown-linux-gnu/release/harmony_inventory_agent" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/${{ env.RELEASE_ID }}/assets?name=harmony_inventory_agent-aarch64-linux"

17
Cargo.lock generated
View File

@@ -1262,6 +1262,22 @@ dependencies = [
"url",
]
[[package]]
name = "brocade-switch-oricom-configuration"
version = "0.1.0"
dependencies = [
"async-trait",
"brocade",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_types",
"log",
"serde",
"tokio",
]
[[package]]
name = "brotli"
version = "8.0.2"
@@ -3640,7 +3656,6 @@ dependencies = [
"cidr",
"env_logger",
"getrandom 0.3.4",
"harmony",
"harmony_macros",
"harmony_types",
"log",

43
build/cross-arm.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/sh
set -e
cd "$(dirname "$0")/.."
# Cross-compile agent crates for aarch64 (Raspberry Pi)
#
# Prerequisites (Debian/Ubuntu):
# sudo apt install gcc-aarch64-linux-gnu
# rustup target add aarch64-unknown-linux-gnu
#
# Prerequisites (Arch Linux):
# sudo pacman -S aarch64-linux-gnu-gcc
# rustup target add aarch64-unknown-linux-gnu
TARGET="aarch64-unknown-linux-gnu"
echo "=== Cross-compiling for $TARGET ==="
# Check prerequisites
if ! rustup target list --installed | grep -q "$TARGET"; then
echo "ERROR: Rust target $TARGET not installed. Run: rustup target add $TARGET"
exit 1
fi
if ! command -v aarch64-linux-gnu-gcc > /dev/null 2>&1; then
echo "ERROR: aarch64-linux-gnu-gcc not found. Install the cross-compilation toolchain."
echo " Debian/Ubuntu: sudo apt install gcc-aarch64-linux-gnu"
echo " Arch Linux: sudo pacman -S aarch64-linux-gnu-gcc"
exit 1
fi
echo "--- Building harmony_agent ---"
cargo build --release --target "$TARGET" -p harmony_agent
echo "--- Building harmony_inventory_agent ---"
cargo build --release --target "$TARGET" -p harmony_inventory_agent
echo ""
echo "=== Build complete ==="
echo "Binaries:"
echo " target/$TARGET/release/harmony_agent"
echo " target/$TARGET/release/harmony_inventory_agent"

View File

@@ -18,16 +18,17 @@ async fn main() {
"openshift_io_alert_source".to_string(),
"platform".to_string(),
);
let mut sel2 = HashMap::new();
sel2.insert("openshift_io_alert_source".to_string(), "".to_string());
let selectors = vec![sel, sel2];
// let mut sel2 = HashMap::new();
// sel2.insert("openshift_io_alert_source".to_string(), "".to_string());
// let selectors = vec![sel, sel2];
let selectors = vec![sel];
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(OpenshiftClusterAlertScore {
receivers: vec![Box::new(DiscordWebhook {
name: K8sName("wills-discord-webhook-example".to_string()),
url: hurl!("https://something.io"),
name: K8sName("sttest0-discord-alerts-webhook-example".to_string()),
url: hurl!("https://discord.com/api/webhooks/1493247909326622953/8kmQAqv1wi4PXGv9LP3MTxAAoSN1Sop2hppNe9FY9Gtqk75eMSfJUJ6YKgq-Hnqcnn9F"),
selectors: selectors,
})],
})],

View File

@@ -600,7 +600,6 @@ fn build_all_scores() -> Result<Vec<Box<dyn Score<OPNSenseFirewall>>>, Box<dyn s
},
],
private_services: vec![],
wan_firewall_ports: vec![],
};
let dhcp_score = DhcpScore::new(

View File

@@ -1,4 +1,4 @@
export HARMONY_SECRET_NAMESPACE=sttest0
export HARMONY_SECRET_STORE=file
export HARMONY_DATABASE_URL=sqlite://harmony_sttest0.sqlite
export RUST_LOG=info
export RUST_LOG=harmony=debug

View File

@@ -1,21 +1,56 @@
mod topology;
use std::collections::HashMap;
use crate::topology::{get_inventory, get_topology};
use harmony::{
config::secret::SshKeyPair,
data::{FileContent, FilePath},
modules::{
inventory::HarmonyDiscoveryStrategy,
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
okd::cluster_monitoring::OpenshiftClusterAlertScore,
},
okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
},
score::Score,
topology::HAClusterTopology,
};
use harmony::modules::monitoring::okd::OpenshiftClusterAlertSender;
use harmony::topology::oberservability::monitoring::AlertReceiver;
use harmony_macros::hurl;
use harmony_secret::SecretManager;
use harmony_types::k8s_name::K8sName;
fn create_receivers() -> Vec<Box<dyn AlertReceiver<OpenshiftClusterAlertSender>>> {
let none_receiver = Box::new(DiscordWebhook {
name: K8sName("af-qc1-none".to_string()),
url: hurl!("https://discord.com/api/webhooks/1493267544893689958/Xg-l8cGhbSsC6DAHGpShFtZ6xhvl5rosBGYtlqiFs2ovBQVoKpsT8mbgFOz3vv9VEE1g"),
selectors: vec![HashMap::from([("severity".to_string(), "none".to_string())])],
});
let info_receiver = Box::new(DiscordWebhook {
name: K8sName("af-qc1-info".to_string()),
url: hurl!("https://discord.com/api/webhooks/1493268147933806713/QcIB5diyHw8GPYmMicffXO8V1oMeHdY8bpkZUxMWul1VLGiSbPjLcC-m6u1uWCjb4gkN"),
selectors: vec![HashMap::from([("severity".to_string(), "info".to_string())])],
});
let warn_receiver = Box::new(DiscordWebhook {
name: K8sName("af-qc1-warn".to_string()),
url: hurl!("https://discord.com/api/webhooks/1493268009807249450/L762ILnosGRGsqGLiOEvxncvC6752l3SNYkl7thnMqttkapFUnfjuWhtaQ08AnqFIRKn"),
selectors: vec![HashMap::from([("severity".to_string(), "warning".to_string())])],
});
let crit_receiver = Box::new(DiscordWebhook {
name: K8sName("af-qc1-crit".to_string()),
url: hurl!("https://discord.com/api/webhooks/1493268276497747998/DVLUnI-LbnHPdDbM7SG_5UTMq_CNlsPawD6aqT1teipIO9JjqFKdkNPBU20vsS3P8L14"),
selectors: vec![HashMap::from([("severity".to_string(), "critical".to_string())])],
});
vec![none_receiver, info_receiver, warn_receiver, crit_receiver]
}
#[tokio::main]
async fn main() {
// env_logger::init();
harmony_cli::cli_logger::init();
let inventory = get_inventory();
let topology = get_topology().await;
@@ -35,6 +70,10 @@ async fn main() {
scores
.append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await);
scores.push(Box::new(OpenshiftClusterAlertScore {
receivers: create_receivers(),
}));
harmony_cli::run(inventory, topology, scores, None)
.await
.unwrap();

View File

@@ -69,6 +69,5 @@ fn build_large_score() -> LoadBalancerScore {
lb_service.clone(),
lb_service.clone(),
],
wan_firewall_ports: vec![],
}
}

View File

@@ -15,7 +15,7 @@ use async_trait::async_trait;
use harmony_types::firewall::VipMode;
use harmony_types::id::Id;
use harmony_types::net::{IpAddress, MacAddress};
use log::info;
use log::{info, warn};
use serde::Serialize;
use crate::config::secret::{OPNSenseApiCredentials, OPNSenseFirewallCredentials};
@@ -60,14 +60,9 @@ impl FirewallPairTopology {
///
/// Credentials are loaded via `SecretManager::get_or_prompt`.
pub async fn opnsense_from_config() -> Self {
// TODO: both firewalls share the same credentials. Named config instances
// are now available in harmony_config (ConfigManager::get_named /
// get_or_prompt_named). To use per-device credentials here, add
// harmony_config as a dependency and impl Config for OPNSenseApiCredentials
// and OPNSenseFirewallCredentials, then replace the calls below with:
// let api_creds = ConfigManager::get_or_prompt_named::<OPNSenseApiCredentials>("fw-primary").await?;
// let backup_api = ConfigManager::get_or_prompt_named::<OPNSenseApiCredentials>("fw-backup").await?;
// See ROADMAP/11-named-config-instances.md for details.
// TODO: both firewalls share the same credentials. Once named config
// instances are available (ROADMAP/11), use per-device credentials:
// ConfigManager::get_named::<OPNSenseApiCredentials>("fw-primary")
let ssh_creds = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>()
.await
.expect("Failed to get SSH credentials");
@@ -181,8 +176,24 @@ impl DhcpServer for FirewallPairTopology {
}
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
// Return primary's view — both should be identical
self.primary.list_static_mappings().await
let primary_mappings = self.primary.list_static_mappings().await;
let backup_mappings = self.backup.list_static_mappings().await;
let primary_set: std::collections::HashSet<_> = primary_mappings.iter().collect();
let backup_set: std::collections::HashSet<_> = backup_mappings.iter().collect();
let only_primary: Vec<_> = primary_set.difference(&backup_set).collect();
let only_backup: Vec<_> = backup_set.difference(&primary_set).collect();
if !only_primary.is_empty() || !only_backup.is_empty() {
warn!(
"DHCP static mapping mismatch between primary and backup firewalls! \
Only on primary: {:?}, Only on backup: {:?}",
only_primary, only_backup
);
}
primary_mappings
}
/// Returns the primary firewall's IP. In a CARP setup, callers

View File

@@ -489,6 +489,9 @@ impl LoadBalancer for DummyInfra {
async fn reload_restart(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_wan_access(&self, _port: u16) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[async_trait]

View File

@@ -37,11 +37,9 @@ pub trait LoadBalancer: Send + Sync {
/// from the WAN interface. Used by load balancers that need to receive
/// external traffic (e.g., OKD ingress on ports 80/443).
///
/// Default implementation is a no-op for topologies that don't manage
/// firewall rules (e.g., cloud environments with security groups).
async fn ensure_wan_access(&self, _port: u16) -> Result<(), ExecutorError> {
Ok(())
}
/// Topologies that don't manage firewall rules (e.g., cloud environments
/// with security groups) should return `Ok(())`.
async fn ensure_wan_access(&self, port: u16) -> Result<(), ExecutorError>;
}
#[derive(Debug, PartialEq, Clone, Serialize)]

View File

@@ -111,12 +111,16 @@ impl LoadBalancer for OPNSenseFirewall {
}
fn haproxy_service_to_harmony(svc: &HaproxyService) -> Option<LoadBalancerService> {
let listening_port = svc.bind.parse().unwrap_or_else(|_| {
panic!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
svc.bind
)
});
let listening_port = match svc.bind.parse() {
Ok(addr) => addr,
Err(e) => {
warn!(
"Skipping HAProxy service: bind address '{}' is not a valid SocketAddr: {e}",
svc.bind
);
return None;
}
};
let backend_servers: Vec<BackendServer> = svc
.servers

View File

@@ -10,6 +10,7 @@ use virt::sys;
use super::error::KvmError;
use super::types::{CdromConfig, NetworkConfig, VmConfig, VmInterface, VmStatus};
use super::xml;
use harmony_types::net::MacAddress;
/// A handle to a libvirt hypervisor.
///
@@ -374,14 +375,15 @@ impl KvmExecutor {
pub async fn set_interface_link(
&self,
vm_name: &str,
mac: &str,
mac: &MacAddress,
up: bool,
) -> Result<(), KvmError> {
let state = if up { "up" } else { "down" };
info!("Setting {vm_name} interface {mac} link {state}");
let mac_str = mac.to_string();
info!("Setting {vm_name} interface {mac_str} link {state}");
let output = tokio::process::Command::new("virsh")
.args(["-c", &self.uri, "domif-setlink", vm_name, mac, state])
.args(["-c", &self.uri, "domif-setlink", vm_name, &mac_str, state])
.output()
.await?;
@@ -420,11 +422,18 @@ impl KvmExecutor {
// virsh domiflist columns: Interface, Type, Source, Model, MAC
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 5 {
let mac = match MacAddress::try_from(parts[4].to_string()) {
Ok(m) => m,
Err(e) => {
warn!("Skipping interface with invalid MAC '{}': {e}", parts[4]);
continue;
}
};
interfaces.push(VmInterface {
interface_type: parts[1].to_string(),
source: parts[2].to_string(),
model: parts[3].to_string(),
mac: parts[4].to_string(),
mac,
});
}
}

View File

@@ -1,3 +1,4 @@
use harmony_types::net::MacAddress;
use serde::{Deserialize, Serialize};
/// Information about a VM's network interface, as reported by `virsh domiflist`.
@@ -10,7 +11,7 @@ pub struct VmInterface {
/// Device model (e.g. "virtio")
pub model: String,
/// MAC address
pub mac: String,
pub mac: MacAddress,
}
/// Specifies how a KVM host is accessed.
@@ -95,7 +96,7 @@ pub struct NetworkRef {
pub name: String,
/// Optional fixed MAC address for this interface. When `None`, libvirt
/// assigns one automatically.
pub mac: Option<String>,
pub mac: Option<MacAddress>,
}
impl NetworkRef {
@@ -106,8 +107,8 @@ impl NetworkRef {
}
}
pub fn with_mac(mut self, mac: impl Into<String>) -> Self {
self.mac = Some(mac.into());
pub fn with_mac(mut self, mac: MacAddress) -> Self {
self.mac = Some(mac);
self
}
}
@@ -260,7 +261,7 @@ impl VmConfigBuilder {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DhcpHost {
/// MAC address (e.g. `"52:54:00:00:50:01"`).
pub mac: String,
pub mac: MacAddress,
/// IP to assign (e.g. `"10.50.0.2"`).
pub ip: String,
/// Optional hostname.
@@ -356,12 +357,12 @@ impl NetworkConfigBuilder {
/// Add a static DHCP host entry (MAC → fixed IP).
pub fn dhcp_host(
mut self,
mac: impl Into<String>,
mac: MacAddress,
ip: impl Into<String>,
name: Option<String>,
) -> Self {
self.dhcp_hosts.push(DhcpHost {
mac: mac.into(),
mac,
ip: ip.into(),
name,
});

View File

@@ -136,7 +136,7 @@ fn nic_devices(vm: &VmConfig) -> String {
.map(|net| {
let mac_line = net
.mac
.as_deref()
.as_ref()
.map(|m| format!("\n <mac address='{m}'/>"))
.unwrap_or_default();
format!(
@@ -221,6 +221,7 @@ mod tests {
use crate::modules::kvm::types::{
BootDevice, ForwardMode, NetworkConfig, NetworkRef, VmConfig,
};
use harmony_types::net::MacAddress;
// ── Domain XML ──────────────────────────────────────────────────────
@@ -284,12 +285,13 @@ mod tests {
#[test]
fn domain_xml_nic_with_mac_address() {
let mac: MacAddress = "52:54:00:aa:bb:cc".to_string().try_into().unwrap();
let vm = VmConfig::builder("mac-test")
.network(NetworkRef::named("mynet").with_mac("52:54:00:AA:BB:CC"))
.network(NetworkRef::named("mynet").with_mac(mac))
.build();
let xml = domain_xml(&vm, "/tmp");
assert!(xml.contains("mac address='52:54:00:AA:BB:CC'"));
assert!(xml.contains("mac address='52:54:00:aa:bb:cc'"));
}
#[test]
@@ -454,14 +456,11 @@ mod tests {
#[test]
fn network_xml_with_dhcp_host() {
let mac: MacAddress = "52:54:00:00:50:01".to_string().try_into().unwrap();
let cfg = NetworkConfig::builder("hostnet")
.subnet("10.50.0.1", 24)
.dhcp_range("10.50.0.100", "10.50.0.200")
.dhcp_host(
"52:54:00:00:50:01",
"10.50.0.2",
Some("opnsense".to_string()),
)
.dhcp_host(mac, "10.50.0.2", Some("opnsense".to_string()))
.build();
let xml = network_xml(&cfg);

View File

@@ -19,12 +19,6 @@ pub struct LoadBalancerScore {
// (listen_interface, LoadBalancerService) tuples or something like that
// I am not sure what to use as listen_interface, should it be interface name, ip address,
// uuid?
/// TCP ports that must be open for inbound WAN traffic.
///
/// The load balancer interpret will call `ensure_wan_access` for each port
/// before configuring services, so that the load balancer is reachable
/// from outside the LAN.
pub wan_firewall_ports: Vec<u16>,
}
impl<T: Topology + LoadBalancer> Score<T> for LoadBalancerScore {
@@ -66,11 +60,6 @@ impl<T: Topology + LoadBalancer> Interpret<T> for LoadBalancerInterpret {
load_balancer.ensure_initialized().await?
);
for port in &self.score.wan_firewall_ports {
info!("Ensuring WAN access for port {port}");
load_balancer.ensure_wan_access(*port).await?;
}
for service in self.score.public_services.iter() {
info!("Ensuring service exists {service:?}");

View File

@@ -350,13 +350,20 @@ impl OKDSetup02BootstrapInterpret {
&self,
inventory: &Inventory,
) -> Result<(), InterpretError> {
info!("[Stage 02/Bootstrap] Waiting for bootstrap to complete...");
let timeout_minutes: u64 = std::env::var("HARMONY_OKD_BOOTSTRAP_TIMEOUT_MINUTES")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(90);
info!(
"[Stage 02/Bootstrap] Waiting for bootstrap to complete (timeout: {timeout_minutes}m)..."
);
info!("[Stage 02/Bootstrap] Running: openshift-install wait-for bootstrap-complete");
let okd_installation_path =
format!("./data/okd/installation_files_{}", inventory.location.name);
let output = Command::new("./data/okd/bin/openshift-install")
let child = Command::new("./data/okd/bin/openshift-install")
.args([
"wait-for",
"bootstrap-complete",
@@ -364,8 +371,17 @@ impl OKDSetup02BootstrapInterpret {
&okd_installation_path,
"--log-level=info",
])
.output()
.output();
let timeout = std::time::Duration::from_secs(timeout_minutes * 60);
let output = tokio::time::timeout(timeout, child)
.await
.map_err(|_| {
InterpretError::new(format!(
"[Stage 02/Bootstrap] bootstrap-complete timed out after {timeout_minutes} minutes. \
Set HARMONY_OKD_BOOTSTRAP_TIMEOUT_MINUTES to increase the timeout and retry."
))
})?
.map_err(|e| {
InterpretError::new(format!(
"[Stage 02/Bootstrap] Failed to run openshift-install wait-for bootstrap-complete: {e}"

View File

@@ -56,7 +56,6 @@ impl OKDBootstrapLoadBalancerScore {
load_balancer_score: LoadBalancerScore {
public_services: vec![],
private_services,
wan_firewall_ports: vec![80, 443],
},
}
}

View File

@@ -114,7 +114,6 @@ impl OKDLoadBalancerScore {
load_balancer_score: LoadBalancerScore {
public_services,
private_services,
wan_firewall_ports: vec![80, 443],
},
}
}
@@ -339,13 +338,6 @@ mod tests {
assert_eq!(private_service_22623.backend_servers.len(), 3);
}
#[test]
fn test_wan_firewall_ports_include_http_and_https() {
let topology = create_test_topology();
let score = OKDLoadBalancerScore::new(&topology);
assert_eq!(score.load_balancer_score.wan_firewall_ports, vec![80, 443]);
}
#[test]
fn test_all_backend_servers_have_correct_port() {
let topology = create_test_topology();

View File

@@ -6,7 +6,6 @@ readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../harmony" }
# harmony_cli = { path = "../harmony_cli" }
harmony_types = { path = "../harmony_types" }
harmony_macros = { path = "../harmony_macros" }

View File

@@ -72,11 +72,6 @@ pub trait ConfigSource: Send + Sync {
}
}
/// Build a composite key for a named config instance: `{base_key}/{name}`.
fn named_key(base_key: &str, name: &str) -> String {
format!("{}/{}", base_key, name)
}
pub struct ConfigManager {
sources: Vec<Arc<dyn ConfigSource>>,
}
@@ -87,62 +82,24 @@ impl ConfigManager {
}
pub async fn get<T: Config>(&self) -> Result<T, ConfigError> {
self.get_by_key(T::KEY).await
}
/// Retrieve a named instance of a config type.
///
/// The storage key becomes `{T::KEY}/{name}`, allowing multiple instances
/// of the same config type (e.g., separate credentials for primary and
/// backup firewalls).
pub async fn get_named<T: Config>(&self, name: &str) -> Result<T, ConfigError> {
let key = named_key(T::KEY, name);
self.get_by_key(&key).await
}
pub async fn get_or_prompt<T: Config>(&self) -> Result<T, ConfigError> {
self.get_or_prompt_by_key(T::KEY).await
}
/// Retrieve a named instance, falling back to interactive prompt if not
/// found in any source. The prompt will display the instance name for
/// clarity.
pub async fn get_or_prompt_named<T: Config>(&self, name: &str) -> Result<T, ConfigError> {
let key = named_key(T::KEY, name);
self.get_or_prompt_by_key(&key).await
}
pub async fn set<T: Config>(&self, config: &T) -> Result<(), ConfigError> {
self.set_by_key(T::KEY, config).await
}
/// Store a named instance of a config type.
pub async fn set_named<T: Config>(&self, name: &str, config: &T) -> Result<(), ConfigError> {
let key = named_key(T::KEY, name);
self.set_by_key(&key, config).await
}
// ── Internal helpers ──────────────────────────────────────────────
async fn get_by_key<T: Config>(&self, key: &str) -> Result<T, ConfigError> {
for source in &self.sources {
if let Some(value) = source.get(key).await? {
if let Some(value) = source.get(T::KEY).await? {
let config: T =
serde_json::from_value(value).map_err(|e| ConfigError::Deserialization {
key: key.to_string(),
key: T::KEY.to_string(),
source: e,
})?;
debug!("Retrieved config for key {} from source", key);
debug!("Retrieved config for key {} from source", T::KEY);
return Ok(config);
}
}
Err(ConfigError::NotFound {
key: key.to_string(),
key: T::KEY.to_string(),
})
}
async fn get_or_prompt_by_key<T: Config>(&self, key: &str) -> Result<T, ConfigError> {
match self.get_by_key::<T>(key).await {
pub async fn get_or_prompt<T: Config>(&self) -> Result<T, ConfigError> {
match self.get::<T>().await {
Ok(config) => Ok(config),
Err(ConfigError::NotFound { .. }) => {
let config =
@@ -150,7 +107,7 @@ impl ConfigManager {
let value =
serde_json::to_value(&config).map_err(|e| ConfigError::Serialization {
key: key.to_string(),
key: T::KEY.to_string(),
source: e,
})?;
@@ -158,7 +115,7 @@ impl ConfigManager {
if !source.should_persist() {
continue;
}
if source.set(key, &value).await.is_ok() {
if source.set(T::KEY, &value).await.is_ok() {
break;
}
}
@@ -169,14 +126,14 @@ impl ConfigManager {
}
}
async fn set_by_key<T: Config>(&self, key: &str, config: &T) -> Result<(), ConfigError> {
pub async fn set<T: Config>(&self, config: &T) -> Result<(), ConfigError> {
let value = serde_json::to_value(config).map_err(|e| ConfigError::Serialization {
key: key.to_string(),
key: T::KEY.to_string(),
source: e,
})?;
for source in &self.sources {
source.set(key, &value).await?;
source.set(T::KEY, &value).await?;
}
Ok(())
@@ -217,33 +174,6 @@ pub async fn set<T: Config>(config: &T) -> Result<(), ConfigError> {
.await
}
pub async fn get_named<T: Config>(name: &str) -> Result<T, ConfigError> {
let manager = CONFIG_MANAGER.lock().await;
manager
.as_ref()
.ok_or(ConfigError::NoSources)?
.get_named::<T>(name)
.await
}
pub async fn get_or_prompt_named<T: Config>(name: &str) -> Result<T, ConfigError> {
let manager = CONFIG_MANAGER.lock().await;
manager
.as_ref()
.ok_or(ConfigError::NoSources)?
.get_or_prompt_named::<T>(name)
.await
}
pub async fn set_named<T: Config>(name: &str, config: &T) -> Result<(), ConfigError> {
let manager = CONFIG_MANAGER.lock().await;
manager
.as_ref()
.ok_or(ConfigError::NoSources)?
.set_named::<T>(name, config)
.await
}
pub fn default_config_dir() -> Option<PathBuf> {
ProjectDirs::from("io", "NationTech", "Harmony").map(|dirs| dirs.data_dir().join("config"))
}
@@ -887,155 +817,4 @@ mod tests {
assert_eq!(result.name, "from_sqlite");
assert_eq!(result.count, 99);
}
// ── Named config instance tests ───────────────────────────────────
#[tokio::test]
async fn test_named_get_returns_value_for_named_key() {
let primary = TestConfig {
name: "primary".to_string(),
count: 1,
};
let mut data = std::collections::HashMap::new();
data.insert(
"TestConfig/primary".to_string(),
serde_json::to_value(&primary).unwrap(),
);
let source = Arc::new(MockSource::with_data(data));
let manager = ConfigManager::new(vec![source]);
let result: TestConfig = manager.get_named("primary").await.unwrap();
assert_eq!(result, primary);
}
#[tokio::test]
async fn test_named_and_unnamed_keys_do_not_collide() {
let unnamed = TestConfig {
name: "unnamed".to_string(),
count: 0,
};
let named_primary = TestConfig {
name: "primary".to_string(),
count: 1,
};
let named_backup = TestConfig {
name: "backup".to_string(),
count: 2,
};
let mut data = std::collections::HashMap::new();
data.insert(
"TestConfig".to_string(),
serde_json::to_value(&unnamed).unwrap(),
);
data.insert(
"TestConfig/primary".to_string(),
serde_json::to_value(&named_primary).unwrap(),
);
data.insert(
"TestConfig/backup".to_string(),
serde_json::to_value(&named_backup).unwrap(),
);
let source = Arc::new(MockSource::with_data(data));
let manager = ConfigManager::new(vec![source]);
let r_unnamed: TestConfig = manager.get().await.unwrap();
let r_primary: TestConfig = manager.get_named("primary").await.unwrap();
let r_backup: TestConfig = manager.get_named("backup").await.unwrap();
assert_eq!(r_unnamed, unnamed);
assert_eq!(r_primary, named_primary);
assert_eq!(r_backup, named_backup);
}
#[tokio::test]
async fn test_named_set_and_get_roundtrip() {
let source = Arc::new(MockSource::new());
let manager = ConfigManager::new(vec![source.clone()]);
let config = TestConfig {
name: "instance_a".to_string(),
count: 42,
};
manager.set_named("instance_a", &config).await.unwrap();
let result: TestConfig = manager.get_named("instance_a").await.unwrap();
assert_eq!(result, config);
// Unnamed get should NOT find the named value
let unnamed: Result<TestConfig, ConfigError> = manager.get().await;
assert!(matches!(unnamed, Err(ConfigError::NotFound { .. })));
}
#[tokio::test]
async fn test_named_resolution_through_source_chain() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
// Empty first source, config in sqlite
let source1 = Arc::new(MockSource::new());
let manager = ConfigManager::new(vec![source1.clone(), sqlite.clone()]);
let config = TestConfig {
name: "from_sqlite_named".to_string(),
count: 77,
};
sqlite
.set(
"TestConfig/my-instance",
&serde_json::to_value(&config).unwrap(),
)
.await
.unwrap();
let result: TestConfig = manager.get_named("my-instance").await.unwrap();
assert_eq!(result, config);
// First source was checked but had nothing
assert_eq!(source1.get_call_count(), 1);
}
#[tokio::test]
async fn test_named_env_var_format() {
let _lock = ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner());
let config = TestConfig {
name: "from_env_named".to_string(),
count: 55,
};
// Named key "TestConfig/fw-primary" should map to env var
// HARMONY_CONFIG_TestConfig_fw_primary
let env_key = "HARMONY_CONFIG_TestConfig_fw_primary";
unsafe {
std::env::set_var(env_key, serde_json::to_string(&config).unwrap());
}
let env_source = Arc::new(EnvSource);
let manager = ConfigManager::new(vec![env_source]);
let result: TestConfig = manager.get_named("fw-primary").await.unwrap();
assert_eq!(result, config);
unsafe {
std::env::remove_var(env_key);
}
}
#[tokio::test]
async fn test_named_not_found() {
let source = Arc::new(MockSource::new());
let manager = ConfigManager::new(vec![source]);
let result: Result<TestConfig, ConfigError> = manager.get_named("nonexistent").await;
assert!(matches!(result, Err(ConfigError::NotFound { ref key }) if key == "TestConfig/nonexistent"));
}
}

View File

@@ -4,10 +4,7 @@ use async_trait::async_trait;
pub struct EnvSource;
fn env_key_for(config_key: &str) -> String {
// Replace `/` and `-` with `_` so named keys like "MyConfig/fw-primary"
// become valid env var names: HARMONY_CONFIG_MyConfig_fw_primary
let sanitized = config_key.replace(['/', '-'], "_");
format!("HARMONY_CONFIG_{}", sanitized)
format!("HARMONY_CONFIG_{}", config_key)
}
#[async_trait]

View File

@@ -46,10 +46,9 @@ impl ConfigSource for LocalFileSource {
}
async fn set(&self, key: &str, value: &serde_json::Value) -> Result<(), ConfigError> {
fs::create_dir_all(&self.base_path).await?;
let path = self.file_path_for(key);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).await?;
}
let contents =
serde_json::to_string_pretty(value).map_err(|e| ConfigError::Serialization {
key: key.to_string(),