From 7c1fedb3039f98c6da465f0dd7c215639a5656d6 Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Thu, 23 Apr 2026 11:10:10 -0400 Subject: [PATCH] =?UTF-8?q?refactor:=20rebrand=20iot=20=E2=86=92=20fleet,?= =?UTF-8?q?=20operator/agent=20crates=20=E2=86=92=20harmony-fleet-*?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IoT vocabulary was anchoring the codebase to one customer's domain. The reconciler pattern is generic — operator in k8s, NATS KV as desired-state bus, agents reconciling podman / OKD / KVM / anything that can register. "Fleet" captures that neutrally; IoT stays acknowledged in docs as the first customer use case. Done now, while nothing is deployed. After a partner fleet lands, changing the CRD group alone is a multi-quarter migration. Scope (nothing left over): Paths + crates - iot/ → fleet/ - iot/iot-operator-v0 → fleet/harmony-fleet-operator - iot/iot-agent-v0 → fleet/harmony-fleet-agent - harmony/src/modules/iot → harmony/src/modules/fleet - ROADMAP/iot_platform → ROADMAP/fleet_platform - examples/iot_{vm_setup, load_test, nats_install} → examples/fleet_* - -v0 suffix dropped on the operator + agent crates (semver in Cargo.toml already tracks version) Rust identifiers - enum IotScore (podman score payload) → ReconcileScore - struct IotDeviceSetupScore/Config → FleetDeviceSetupScore/Config - InterpretName::IotDeviceSetup → InterpretName::FleetDeviceSetup - HarmonyIotPool → HarmonyFleetPool (libvirt pool) - HARMONY_IOT_POOL_NAME (default "harmony-iot") → HARMONY_FLEET_POOL_NAME ("harmony-fleet") - IotSshKeypair → FleetSshKeypair - ensure_iot_ssh_keypair / ensure_harmony_iot_pool / check_iot_smoke_preflight_for_arch → fleet-prefixed variants Wire / config surfaces - CRD group `iot.nationtech.io` → `fleet.nationtech.io` - Finalizer `iot.nationtech.io/finalizer` → `fleet.nationtech.io/finalizer` - Shortnames iotdep/iotdevice → fleetdep/fleetdev - Env var IOT_AGENT_CONFIG → FLEET_AGENT_CONFIG - Env var IOT_VM_ADMIN_PASSWORD → FLEET_VM_ADMIN_PASSWORD - Binary /usr/local/bin/iot-agent → /usr/local/bin/fleet-agent - Systemd user `iot-agent` → `fleet-agent` - VM admin user `iot-admin` → `fleet-admin` Defaults - Namespaces iot-system/iot-demo/iot-load → fleet-system/fleet-demo/fleet-load - Helm release iot-nats → fleet-nats - Helm release iot-operator-v0 → harmony-fleet-operator - Container image localhost/iot-operator-v0:latest → localhost/harmony-fleet-operator:latest - On-disk cache $HARMONY_DATA_DIR/iot/ → $HARMONY_DATA_DIR/fleet/ (cloud-images, ssh keypairs, libvirt pool) What stayed - harmony-reconciler-contracts — already neutrally named - Wire types (DeviceInfo, DeploymentState, HeartbeatPayload, DeploymentName) — already neutral - KV buckets (device-info, device-state, device-heartbeat, desired-state) — already neutral - CRD kind names (Deployment, Device) — already neutral - NatsBasicScore / NatsHelmChartScore / HelmChart / etc. — framework-scope, unchanged Verification - cargo check --workspace --all-targets: clean - All harmony lib tests (114), fleet-operator (6), fleet-agent (7), harmony-reconciler-contracts (13): green - End-to-end load-test (20 devices / 3 CRs / 20s under fleet/scripts/load-test.sh): PASS. Image built as localhost/harmony-fleet-operator:latest, chart installed as release harmony-fleet-operator in namespace fleet-system, all CR aggregates correct. Zero stragglers: grep across the tree for \biot\b / IOT_ / \bIot[A-Z] returns empty (excluding docs explicitly talking about IoT as the first customer's domain). --- Cargo.lock | 118 +++++++++--------- Cargo.toml | 4 +- ROADMAP/12-code-review-april-2026.md | 4 +- .../arm_vm_plan.md | 14 +-- .../chapter_4_aggregation_scale.md | 4 +- .../context_conversation.md | 2 +- .../v0_1_plan.md | 20 +-- .../v0_walking_skeleton.md | 88 ++++++------- .../Cargo.toml | 6 +- .../src/main.rs | 18 +-- .../Cargo.toml | 4 +- .../src/main.rs | 12 +- .../Cargo.toml | 4 +- .../README.md | 12 +- .../src/main.rs | 47 +++---- examples/harmony_apply_deployment/Cargo.toml | 2 +- examples/harmony_apply_deployment/src/main.rs | 18 +-- .../harmony-fleet-agent}/Cargo.toml | 2 +- .../harmony-fleet-agent}/src/config.rs | 0 .../src/fleet_publisher.rs | 0 .../harmony-fleet-agent}/src/main.rs | 8 +- .../harmony-fleet-agent}/src/reconciler.rs | 6 +- .../harmony-fleet-operator}/Cargo.toml | 2 +- .../harmony-fleet-operator}/Dockerfile | 6 +- .../harmony-fleet-operator}/src/chart.rs | 22 ++-- .../harmony-fleet-operator}/src/controller.rs | 2 +- .../harmony-fleet-operator}/src/crd.rs | 10 +- .../src/device_reconciler.rs | 4 +- .../src/fleet_aggregator.rs | 2 +- .../harmony-fleet-operator}/src/install.rs | 2 +- .../harmony-fleet-operator}/src/lib.rs | 0 .../harmony-fleet-operator}/src/main.rs | 12 +- {iot => fleet}/scripts/load-test.sh | 72 +++++------ {iot => fleet}/scripts/smoke-a1.sh | 42 +++---- {iot => fleet}/scripts/smoke-a3-arm.sh | 8 +- {iot => fleet}/scripts/smoke-a3.sh | 20 +-- {iot => fleet}/scripts/smoke-a4.sh | 100 +++++++-------- harmony-reconciler-contracts/src/kv.rs | 2 +- harmony/src/domain/interpret/mod.rs | 4 +- .../src/domain/topology/host_configuration.rs | 2 +- harmony/src/modules/application/helm/mod.rs | 16 +-- harmony/src/modules/{iot => fleet}/assets.rs | 26 ++-- .../modules/{iot => fleet}/libvirt_pool.rs | 22 ++-- harmony/src/modules/fleet/mod.rs | 40 ++++++ .../src/modules/{iot => fleet}/preflight.rs | 10 +- .../src/modules/{iot => fleet}/setup_score.rs | 82 ++++++------ .../src/modules/{iot => fleet}/vm_score.rs | 0 harmony/src/modules/iot/mod.rs | 33 ----- harmony/src/modules/k8s/bare_topology.rs | 2 +- harmony/src/modules/kvm/cloudinit.rs | 6 +- harmony/src/modules/kvm/topology.rs | 2 +- .../src/modules/linux/ansible_configurator.rs | 4 +- harmony/src/modules/mod.rs | 2 +- harmony/src/modules/nats/helm_chart.rs | 6 +- harmony/src/modules/nats/score_nats_basic.rs | 6 +- harmony/src/modules/podman/mod.rs | 2 +- harmony/src/modules/podman/score.rs | 14 +-- 57 files changed, 499 insertions(+), 479 deletions(-) rename ROADMAP/{iot_platform => fleet_platform}/arm_vm_plan.md (94%) rename ROADMAP/{iot_platform => fleet_platform}/chapter_4_aggregation_scale.md (99%) rename ROADMAP/{iot_platform => fleet_platform}/context_conversation.md (99%) rename ROADMAP/{iot_platform => fleet_platform}/v0_1_plan.md (96%) rename ROADMAP/{iot_platform => fleet_platform}/v0_walking_skeleton.md (91%) rename examples/{iot_load_test => fleet_load_test}/Cargo.toml (81%) rename examples/{iot_load_test => fleet_load_test}/src/main.rs (97%) rename examples/{iot_nats_install => fleet_nats_install}/Cargo.toml (79%) rename examples/{iot_nats_install => fleet_nats_install}/src/main.rs (88%) rename examples/{iot_vm_setup => fleet_vm_setup}/Cargo.toml (86%) rename examples/{iot_vm_setup => fleet_vm_setup}/README.md (84%) rename examples/{iot_vm_setup => fleet_vm_setup}/src/main.rs (87%) rename {iot/iot-agent-v0 => fleet/harmony-fleet-agent}/Cargo.toml (91%) rename {iot/iot-agent-v0 => fleet/harmony-fleet-agent}/src/config.rs (100%) rename {iot/iot-agent-v0 => fleet/harmony-fleet-agent}/src/fleet_publisher.rs (100%) rename {iot/iot-agent-v0 => fleet/harmony-fleet-agent}/src/main.rs (96%) rename {iot/iot-agent-v0 => fleet/harmony-fleet-agent}/src/reconciler.rs (98%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/Cargo.toml (91%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/Dockerfile (84%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/chart.rs (94%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/controller.rs (98%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/crd.rs (96%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/device_reconciler.rs (97%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/fleet_aggregator.rs (99%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/install.rs (95%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/lib.rs (100%) rename {iot/iot-operator-v0 => fleet/harmony-fleet-operator}/src/main.rs (91%) rename {iot => fleet}/scripts/load-test.sh (80%) rename {iot => fleet}/scripts/smoke-a1.sh (89%) rename {iot => fleet}/scripts/smoke-a3-arm.sh (69%) rename {iot => fleet}/scripts/smoke-a3.sh (92%) rename {iot => fleet}/scripts/smoke-a4.sh (84%) rename harmony/src/modules/{iot => fleet}/assets.rs (93%) rename harmony/src/modules/{iot => fleet}/libvirt_pool.rs (86%) create mode 100644 harmony/src/modules/fleet/mod.rs rename harmony/src/modules/{iot => fleet}/preflight.rs (95%) rename harmony/src/modules/{iot => fleet}/setup_score.rs (82%) rename harmony/src/modules/{iot => fleet}/vm_score.rs (100%) delete mode 100644 harmony/src/modules/iot/mod.rs diff --git a/Cargo.lock b/Cargo.lock index b6f17d45..da364e76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3166,29 +3166,15 @@ dependencies = [ ] [[package]] -name = "example_harmony_apply_deployment" -version = "0.1.0" -dependencies = [ - "anyhow", - "clap", - "harmony", - "iot-operator-v0", - "k8s-openapi", - "kube", - "serde_json", - "tokio", -] - -[[package]] -name = "example_iot_load_test" +name = "example_fleet_load_test" version = "0.1.0" dependencies = [ "anyhow", "async-nats", "chrono", "clap", + "harmony-fleet-operator", "harmony-reconciler-contracts", - "iot-operator-v0", "k8s-openapi", "kube", "rand 0.9.2", @@ -3199,7 +3185,7 @@ dependencies = [ ] [[package]] -name = "example_iot_nats_install" +name = "example_fleet_nats_install" version = "0.1.0" dependencies = [ "anyhow", @@ -3209,7 +3195,7 @@ dependencies = [ ] [[package]] -name = "example_iot_vm_setup" +name = "example_fleet_vm_setup" version = "0.1.0" dependencies = [ "anyhow", @@ -3221,6 +3207,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "example_harmony_apply_deployment" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "harmony", + "harmony-fleet-operator", + "k8s-openapi", + "kube", + "serde_json", + "tokio", +] + [[package]] name = "example_linux_vm" version = "0.1.0" @@ -3733,6 +3733,47 @@ dependencies = [ "walkdir", ] +[[package]] +name = "harmony-fleet-agent" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-nats", + "chrono", + "clap", + "futures-util", + "harmony", + "harmony-reconciler-contracts", + "serde", + "serde_json", + "tokio", + "toml", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "harmony-fleet-operator" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-nats", + "chrono", + "clap", + "futures-util", + "harmony", + "harmony-reconciler-contracts", + "k8s-openapi", + "kube", + "schemars 0.8.22", + "serde", + "serde_json", + "thiserror 2.0.18", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "harmony-k8s" version = "0.1.0" @@ -4755,47 +4796,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "iot-agent-v0" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-nats", - "chrono", - "clap", - "futures-util", - "harmony", - "harmony-reconciler-contracts", - "serde", - "serde_json", - "tokio", - "toml", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "iot-operator-v0" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-nats", - "chrono", - "clap", - "futures-util", - "harmony", - "harmony-reconciler-contracts", - "k8s-openapi", - "kube", - "schemars 0.8.22", - "serde", - "serde_json", - "thiserror 2.0.18", - "tokio", - "tracing", - "tracing-subscriber", -] - [[package]] name = "ipnet" version = "2.12.0" diff --git a/Cargo.toml b/Cargo.toml index 53e2b62d..92182b4f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,8 +28,8 @@ members = [ "harmony_node_readiness", "harmony-k8s", "harmony_assets", "opnsense-codegen", "opnsense-api", - "iot/iot-operator-v0", - "iot/iot-agent-v0", + "fleet/harmony-fleet-operator", + "fleet/harmony-fleet-agent", "harmony-reconciler-contracts", ] diff --git a/ROADMAP/12-code-review-april-2026.md b/ROADMAP/12-code-review-april-2026.md index 7986aa1e..cbaf7938 100644 --- a/ROADMAP/12-code-review-april-2026.md +++ b/ROADMAP/12-code-review-april-2026.md @@ -99,7 +99,7 @@ Replace `kubectl exec bao ...` shell commands in `openbao/setup.rs` with typed ` `K8sAnywhereTopology` and `HAClusterTopology` have accumulated opinions — cert-manager install, tenant manager setup, helm probes, TLS passthrough, SSO wiring — that make them unfit for narrow, ad-hoc Score execution. Calling `ensure_ready()` on `K8sAnywhereTopology` to apply a single CRD installs a full product stack as a side effect; that's the opposite of what "make me ready" should mean. -Concrete example: `iot/iot-operator-v0/src/install.rs` needed a topology that satisfies `K8sclient` for a single `K8sResourceScore::` apply. `K8sAnywhereTopology` was wrong (too heavy); `HAClusterTopology` was wrong (bare-metal). Work-around: a 30-line inline `InstallTopology` that wraps a pre-built `K8sClient` and has a noop `ensure_ready`. That file flags the architectural smell in its doc comment and points back to this entry. +Concrete example: `fleet/harmony-fleet-operator/src/install.rs` needed a topology that satisfies `K8sclient` for a single `K8sResourceScore::` apply. `K8sAnywhereTopology` was wrong (too heavy); `HAClusterTopology` was wrong (bare-metal). Work-around: a 30-line inline `InstallTopology` that wraps a pre-built `K8sClient` and has a noop `ensure_ready`. That file flags the architectural smell in its doc comment and points back to this entry. If every narrow Score ends up vendoring its own ad-hoc topology, we get exactly the proliferation this entry is meant to prevent. @@ -113,4 +113,4 @@ If every narrow Score ends up vendoring its own ad-hoc topology, we get exactly - Adding a new ad-hoc Score against k8s doesn't require inventing a new topology. - `K8sAnywhereTopology` stops being the default reach and starts being a deliberate product choice. -- Test: can we delete the inline `InstallTopology` in `iot/iot-operator-v0/src/install.rs` by replacing it with a one-liner `K8sBareTopology::from_env()`? That's the smoke test for "we fixed the proliferation." +- Test: can we delete the inline `InstallTopology` in `fleet/harmony-fleet-operator/src/install.rs` by replacing it with a one-liner `K8sBareTopology::from_env()`? That's the smoke test for "we fixed the proliferation." diff --git a/ROADMAP/iot_platform/arm_vm_plan.md b/ROADMAP/fleet_platform/arm_vm_plan.md similarity index 94% rename from ROADMAP/iot_platform/arm_vm_plan.md rename to ROADMAP/fleet_platform/arm_vm_plan.md index b4118cc8..653f2b67 100644 --- a/ROADMAP/iot_platform/arm_vm_plan.md +++ b/ROADMAP/fleet_platform/arm_vm_plan.md @@ -15,7 +15,7 @@ for CI) so: - the VM runs the same Ubuntu 24.04 arm64 cloud image customers will eventually flash onto a Pi; -- the iot-agent shipped to it is a real aarch64 binary produced by +- the fleet-agent shipped to it is a real aarch64 binary produced by our existing cross-compile toolchain; - apt/systemd/podman on the VM are the actual arm64 packages; and - smoke-a3 exercises all of it end-to-end. @@ -126,11 +126,11 @@ In `modules/iot/preflight.rs`, when the caller asks for arm64 VMs ### 6. Cross-compiled agent smoke-a3.sh phase 2 currently does native `cargo build --release --p iot-agent-v0`. When arch=aarch64: +-p fleet-agent-v0`. When arch=aarch64: - `cargo build --release --target aarch64-unknown-linux-gnu - -p iot-agent-v0` + -p fleet-agent-v0` - AGENT_BINARY points at `target/aarch64-unknown-linux-gnu/release/ - iot-agent-v0` + fleet-agent-v0` Opt-in via `--arch aarch64` CLI flag on both `example_iot_vm_setup` and `smoke-a3.sh`. Default stays x86_64. @@ -152,9 +152,9 @@ arch=aarch64. Smoke-a3's phase 5 reboot gate also lengthens. | `harmony/src/modules/kvm/topology.rs` | Copy per-VM NVRAM template on ensure_vm; thread arch through to XML. | | `harmony/src/modules/iot/assets.rs` | `ensure_ubuntu_2404_cloud_image_for_arch(arch)`; pin arm64 URL+sha256. | | `harmony/src/modules/iot/preflight.rs` | Arch-aware preflight; qemu-system-aarch64 + firmware + qemu-version. | -| `examples/iot_vm_setup/src/main.rs` | `--arch x86_64|aarch64` CLI flag; resolve matching cloud image. | -| `iot/scripts/smoke-a3.sh` | Arch flag plumbing; cross-compile; extended timeouts; preflight. | -| `iot/scripts/smoke-a3-arm.sh` (new) | Dedicated arm smoke as the CI hook — `ARCH=aarch64 ./smoke-a3.sh`. | +| `examples/fleet_vm_setup/src/main.rs` | `--arch x86_64|aarch64` CLI flag; resolve matching cloud image. | +| `fleet/scripts/smoke-a3.sh` | Arch flag plumbing; cross-compile; extended timeouts; preflight. | +| `fleet/scripts/smoke-a3-arm.sh` (new) | Dedicated arm smoke as the CI hook — `ARCH=aarch64 ./smoke-a3.sh`. | ## Out of scope diff --git a/ROADMAP/iot_platform/chapter_4_aggregation_scale.md b/ROADMAP/fleet_platform/chapter_4_aggregation_scale.md similarity index 99% rename from ROADMAP/iot_platform/chapter_4_aggregation_scale.md rename to ROADMAP/fleet_platform/chapter_4_aggregation_scale.md index 5657a1bf..6647c735 100644 --- a/ROADMAP/iot_platform/chapter_4_aggregation_scale.md +++ b/ROADMAP/fleet_platform/chapter_4_aggregation_scale.md @@ -42,8 +42,8 @@ > **Where to look now:** > > - Shipped design: `v0_1_plan.md` Chapter 2 (marked SHIPPED 2026-04-23). -> - Source of truth: `iot/iot-operator-v0/src/fleet_aggregator.rs`, -> `iot/iot-operator-v0/src/device_reconciler.rs`, +> - Source of truth: `fleet/harmony-fleet-operator/src/fleet_aggregator.rs`, +> `fleet/harmony-fleet-operator/src/device_reconciler.rs`, > `harmony-reconciler-contracts/src/{fleet,kv,status}.rs`. > > Everything below is preserved verbatim as the decision trail of a diff --git a/ROADMAP/iot_platform/context_conversation.md b/ROADMAP/fleet_platform/context_conversation.md similarity index 99% rename from ROADMAP/iot_platform/context_conversation.md rename to ROADMAP/fleet_platform/context_conversation.md index 8c8f588b..2a44d003 100644 --- a/ROADMAP/iot_platform/context_conversation.md +++ b/ROADMAP/fleet_platform/context_conversation.md @@ -183,7 +183,7 @@ Drawing these out as they're load-bearing for judgment calls: 8. **The partner relationship is strategic.** Tuesday demo conversation is half the Tuesday deliverable. Framing the v0.1/v0.2/v0.3 roadmap to them matters as much as the running code. -9. **End-customer debuggability is a UX constraint.** Mechanical/electrical/chemical engineers will touch these devices. `systemctl status iot-agent` must tell them what's happening. `journalctl -u iot-agent` must be parseable by humans. Error messages must be understandable without Kubernetes knowledge. +9. **End-customer debuggability is a UX constraint.** Mechanical/electrical/chemical engineers will touch these devices. `systemctl status fleet-agent` must tell them what's happening. `journalctl -u fleet-agent` must be parseable by humans. Error messages must be understandable without Kubernetes knowledge. 10. **NATS is the long-term architectural commitment.** Everything on NATS — not as a queue, as a coordination fabric. The "decentralized cluster management" future depends on this choice. Implementation decisions that weaken this (e.g., "let's just put a database in the middle") should be pushed back on. diff --git a/ROADMAP/iot_platform/v0_1_plan.md b/ROADMAP/fleet_platform/v0_1_plan.md similarity index 96% rename from ROADMAP/iot_platform/v0_1_plan.md rename to ROADMAP/fleet_platform/v0_1_plan.md index a0aa67f5..5bf663fc 100644 --- a/ROADMAP/iot_platform/v0_1_plan.md +++ b/ROADMAP/fleet_platform/v0_1_plan.md @@ -11,7 +11,7 @@ five chapters in execution order. - CRD → operator → NATS JetStream KV write path (`smoke-a1.sh`). - Agent watches KV, reconciles podman containers (`smoke-a1.sh`). -- VM-as-device provisioning: cloud-init + iot-agent install + NATS +- VM-as-device provisioning: cloud-init + fleet-agent install + NATS smoke (`smoke-a3.sh`), x86_64 (native KVM) and aarch64 (TCG). - Power-cycle / reboot resilience (`smoke-a3.sh` phase 5). - aarch64 cross-compile of the agent (no Harmony modules need to @@ -53,7 +53,7 @@ have to re-litigate): serialized score payloads; drift triggers re-reconcile. `PodmanTopology::ensure_service_running` removes then re-creates containers on spec drift. No "stale + new" window. -- **The polymorphism stays.** `IotScore` is an externally-tagged +- **The polymorphism stays.** `ReconcileScore` is an externally-tagged enum; adding `OkdApplyV0` later is additive. **Surprises since v0 started** (for context, none architectural): @@ -141,7 +141,7 @@ the workstation. ### Command menu at hand-off -- `kubectl get deployments.iot.nationtech.io -A -w` — watch CR +- `kubectl get deployments.fleet.nationtech.io -A -w` — watch CR reconcile reactively. - `cargo run -q -p example_harmony_apply_deployment -- --image nginx:latest --target-device $TARGET_DEVICE` — apply an nginx @@ -149,7 +149,7 @@ the workstation. - `cargo run -q -p example_harmony_apply_deployment -- --print --image nginx:latest --target-device $TARGET_DEVICE | kubectl apply -f -` — same thing, through kubectl. -- `ssh -i $SSH_KEY iot-admin@$VM_IP` — connect to the VM. +- `ssh -i $SSH_KEY fleet-admin@$VM_IP` — connect to the VM. - `virsh console $VM_NAME --force` — serial console alternative. - `podman --url unix://$VM_IP:... ps` or ssh + `podman ps` — list containers on the VM from the workstation. @@ -173,10 +173,10 @@ the workstation. - **NEW** `examples/harmony_apply_deployment/Cargo.toml` + `src/main.rs` — typed applier. -- **NEW** `iot/scripts/smoke-a4.sh`. +- **NEW** `fleet/scripts/smoke-a4.sh`. - **NO yaml fixtures.** Rust CLI flags cover the shape. - Optional: factor shared smoke phases (NATS up, k3d up, operator - spawn, VM provision) into `iot/scripts/lib/` if the duplication + spawn, VM provision) into `fleet/scripts/lib/` if the duplication across a1/a3/a4 becomes obvious. Don't force it. ### NATS exposure — implementation-time notes @@ -190,9 +190,9 @@ the workstation. ### Verification -- Fresh host: `ARCH=aarch64 ./iot/scripts/smoke-a4.sh` completes +- Fresh host: `ARCH=aarch64 ./fleet/scripts/smoke-a4.sh` completes in 8-15 min, prints the command menu. -- `ARCH=aarch64 ./iot/scripts/smoke-a4.sh --auto` PASSes +- `ARCH=aarch64 ./fleet/scripts/smoke-a4.sh --auto` PASSes end-to-end including upgrade id-change assertion. - x86_64 (`ARCH=x86-64`) completes in 2-5 min. @@ -262,7 +262,7 @@ out of this chapter; follow-up item). ### Scale proof -`iot/scripts/load-test.sh` + `examples/iot_load_test` simulate N +`fleet/scripts/load-test.sh` + `examples/fleet_load_test` simulate N devices across M Deployments, driving `device-state` KV updates at a configurable cadence while the full operator stack runs against a local k3d apiserver. Verified: @@ -307,7 +307,7 @@ concern downstream. ### Sketch -- Chart location: `iot/iot-operator-v0/chart/` (or sibling repo — +- Chart location: `fleet/harmony-fleet-operator/chart/` (or sibling repo — defer decision to implementation time). - Templates: Namespace, SA, ClusterRole, ClusterRoleBinding, Deployment (operator pod), CRD. diff --git a/ROADMAP/iot_platform/v0_walking_skeleton.md b/ROADMAP/fleet_platform/v0_walking_skeleton.md similarity index 91% rename from ROADMAP/iot_platform/v0_walking_skeleton.md rename to ROADMAP/fleet_platform/v0_walking_skeleton.md index 110ff09e..1380f0c3 100644 --- a/ROADMAP/iot_platform/v0_walking_skeleton.md +++ b/ROADMAP/fleet_platform/v0_walking_skeleton.md @@ -13,7 +13,7 @@ > than kubectl-apply-a-yaml. See smoke-a1, smoke-a3, smoke-a3-arm for the > executable proof. > -> **Forward plan lives in `ROADMAP/iot_platform/v0_1_plan.md`** — five +> **Forward plan lives in `ROADMAP/fleet_platform/v0_1_plan.md`** — five > chapters covering hands-on demo, status reflect-back, helm chart, SSO/ > secrets, and frontend. When a chapter grows scope it may move into its > own `chapter_N_*.md`. @@ -134,11 +134,11 @@ iot-workload-hello/ `deployment.yaml`: ```yaml -apiVersion: iot.nationtech.io/v1alpha1 +apiVersion: fleet.nationtech.io/v1alpha1 kind: Deployment metadata: name: hello-world - namespace: iot-demo + namespace: fleet-demo spec: targetDevices: - pi-demo-01 @@ -156,10 +156,10 @@ spec: ### 5.2 Central cluster setup Existing k8s cluster. Namespaces: -- `iot-system` — operator, NATS (single-node for v0) -- `iot-demo` — `Deployment` CRs +- `fleet-system` — operator, NATS (single-node for v0) +- `fleet-demo` — `Deployment` CRs -ArgoCD application pre-configured to sync `iot-workload-hello` repo into `iot-demo` namespace. +ArgoCD application pre-configured to sync `iot-workload-hello` repo into `fleet-demo` namespace. ### 5.3 Raspberry Pi 5 setup @@ -169,9 +169,9 @@ Base OS: **Ubuntu Server 24.04 LTS ARM64** (ships Podman 4.9 in repos). Raspberr Installed: - `podman` (4.4+, ARM64) with `systemctl --user enable --now podman.socket` (required for `podman-api` crate) -- `iot-agent` binary (cross-compiled to aarch64 via existing Harmony aarch64 toolchain) -- `/etc/iot-agent/config.toml` with NATS URL + shared credential -- systemd unit `iot-agent.service` +- `fleet-agent` binary (cross-compiled to aarch64 via existing Harmony aarch64 toolchain) +- `/etc/fleet-agent/config.toml` with NATS URL + shared credential +- systemd unit `fleet-agent.service` ### 5.4 What the code does @@ -245,7 +245,7 @@ trait CredentialSource: Send + Sync { } ``` -v0: `TomlFileCredentialSource` reading `/etc/iot-agent/config.toml`. +v0: `TomlFileCredentialSource` reading `/etc/fleet-agent/config.toml`. v0.2: `ZitadelBootstrappedCredentialSource` — same trait, swapped via config. 30 minutes Friday. Saves 3 hours of refactor in v0.2. @@ -276,7 +276,7 @@ device_id = "pi-demo-01" [credentials] type = "toml-shared" -nats_user = "iot-agent" +nats_user = "fleet-agent" nats_pass = "dev-shared-password" [nats] @@ -324,9 +324,9 @@ Document findings in the Friday night log regardless of outcome. v0.1 work inclu - Write 1-page `v0-demo.md`: demo script, success criteria, fallback plan. - Decide Pi OS: Ubuntu 24.04 ARM64 (default) vs Raspberry Pi OS 64-bit. Don't agonize beyond 10 min. -*Dispatch agent A1 (operator):* "Create Rust crate `iot/iot-operator-v0/` using `kube-rs` implementing a Deployment CRD controller that writes to NATS KV. Exact spec in task card §9.A1. Self-verify: `kubectl apply` → `nats kv get` shows entry. Under 300 lines main.rs. No auth." +*Dispatch agent A1 (operator):* "Create Rust crate `fleet/harmony-fleet-operator/` using `kube-rs` implementing a Deployment CRD controller that writes to NATS KV. Exact spec in task card §9.A1. Self-verify: `kubectl apply` → `nats kv get` shows entry. Under 300 lines main.rs. No auth." -*Dispatch agent A2 (Pi provisioning, fallback-aware):* "Attempt Harmony-based Raspberry Pi 5 provisioning Score. Target: fresh Pi flashed via SD card, boots, static IP, Ubuntu 24.04 ARM64 with Podman 4.9, podman user socket enabled, user `iot-agent` with linger enabled, `/etc/iot-agent/` ready. If Harmony doesn't have Pi primitives, document the gap and produce a manual provisioning runbook instead (rpi-imager + cloud-init). Hard time limit: 90 min. Self-verify: `ssh iot-agent@ 'podman --version'` returns 4.4+." +*Dispatch agent A2 (Pi provisioning, fallback-aware):* "Attempt Harmony-based Raspberry Pi 5 provisioning Score. Target: fresh Pi flashed via SD card, boots, static IP, Ubuntu 24.04 ARM64 with Podman 4.9, podman user socket enabled, user `fleet-agent` with linger enabled, `/etc/fleet-agent/` ready. If Harmony doesn't have Pi primitives, document the gap and produce a manual provisioning runbook instead (rpi-imager + cloud-init). Hard time limit: 90 min. Self-verify: `ssh fleet-agent@ 'podman --version'` returns 4.4+." **Hour 2 — your work: agent crate** @@ -342,8 +342,8 @@ Crate in `harmony/src/modules/iot_agent/` or a new binary in the Harmony workspa **Hour 3 — local integration** -- Review agent A1's operator. Deploy to central cluster `iot-system` namespace. -- Deploy NATS to `iot-system` if not already (single-node JetStream). +- Review agent A1's operator. Deploy to central cluster `fleet-system` namespace. +- Deploy NATS to `fleet-system` if not already (single-node JetStream). - Review agent A2's Pi provisioning. If Harmony Score succeeded, note for demo; if manual runbook, accept and move on. - Agent compiles on laptop. Connects to central NATS. @@ -398,7 +398,7 @@ Named subsection: the most important class of failures for Pi-in-field deploymen **Hour 3-4 — demo polish:** - `./demo.sh` is one command, no manual steps. - Output is clean: clear PASS/FAIL with per-phase timings. -- `kubectl get deployments.iot.nationtech.io` output is readable. +- `kubectl get deployments.fleet.nationtech.io` output is readable. **Hour 5-6 — partner-facing polish:** - README in workload repo: 4 lines. "Edit this, git push, done." @@ -439,8 +439,8 @@ Each card is self-contained. Hand the entire card to an agent. # Note: harmony is built with --no-default-features to exclude KVM (libvirt cannot cross-compile to aarch64). # The 5 KVM examples (kvm_vm_examples, kvm_okd_ha_cluster, opnsense_vm_integration, # opnsense_pair_integration, example_linux_vm) are x86_64-only by design. -cargo build --target x86_64-unknown-linux-gnu -p harmony -p harmony_agent -p iot-agent-v0 -p iot-operator-v0 -cargo build --target aarch64-unknown-linux-gnu -p harmony --no-default-features -p harmony_agent -p iot-agent-v0 -p iot-operator-v0 +cargo build --target x86_64-unknown-linux-gnu -p harmony -p harmony_agent -p fleet-agent-v0 -p harmony-fleet-operator +cargo build --target aarch64-unknown-linux-gnu -p harmony --no-default-features -p harmony_agent -p fleet-agent-v0 -p harmony-fleet-operator ``` All three must exit 0. Note: `cargo test --target aarch64-unknown-linux-gnu` cannot run on x86_64 (exec format error) — that's expected. Test execution is only for the host architecture via `./build/check.sh`. If any check fails, fix the issue before marking the task complete. Include the output in the PR description. @@ -449,11 +449,11 @@ All three must exit 0. Note: `cargo test --target aarch64-unknown-linux-gnu` can **Goal:** `kube-rs` operator that watches `Deployment` CRs and writes the Score to NATS KV. -**Deliverable:** Crate `iot/iot-operator-v0/`: +**Deliverable:** Crate `fleet/harmony-fleet-operator/`: - `Cargo.toml`: `kube`, `k8s-openapi`, `async-nats`, `serde`, `serde_yaml`, `serde_json`, `tokio`, `tracing`, `tracing-subscriber`, `anyhow`. - `src/main.rs` under 300 lines. - `deploy/operator.yaml` — Deployment, ServiceAccount, ClusterRole, ClusterRoleBinding. -- `deploy/crd.yaml` — `Deployment` CRD for `iot.nationtech.io/v1alpha1`. +- `deploy/crd.yaml` — `Deployment` CRD for `fleet.nationtech.io/v1alpha1`. **Behavior:** 1. Connect to NATS on startup (`NATS_URL` env, no auth). @@ -480,7 +480,7 @@ status: **Self-verification:** ```bash -cd iot/iot-operator-v0 +cd fleet/harmony-fleet-operator cargo build && cargo clippy -- -D warnings # Test against k3d: @@ -492,7 +492,7 @@ OP_PID=$! sleep 3 kubectl apply -f - < 'podman --version' +ssh fleet-agent@ 'podman --version' # Must be 4.4+ (target 4.9+) -ssh iot-agent@ 'systemctl --user is-active podman.socket' +ssh fleet-agent@ 'systemctl --user is-active podman.socket' # Must print "active" -ssh iot-agent@ 'loginctl show-user iot-agent | grep Linger=yes' -ssh iot-agent@ 'uname -m' +ssh fleet-agent@ 'loginctl show-user fleet-agent | grep Linger=yes' +ssh fleet-agent@ 'uname -m' # Must print aarch64 ``` @@ -568,13 +568,13 @@ ssh iot-agent@ 'uname -m' **Prerequisites:** Agent binary exists (Sylvain writes Friday). -**Deliverable:** `iot/iot-agent-v0/scripts/install.sh`: +**Deliverable:** `iot/fleet-agent-v0/scripts/install.sh`: 1. Args: `--host `, `--device-id `, `--nats-url `, `--nats-user `, `--nats-pass

`. 2. Cross-builds for aarch64 using existing Harmony aarch64 toolchain. -3. `scp` binary to Pi, `sudo mv` to `/usr/local/bin/iot-agent`. -4. Templates `/etc/iot-agent/config.toml` from args. -5. Installs `/etc/systemd/system/iot-agent.service`. -6. `systemctl daemon-reload && systemctl enable --now iot-agent`. +3. `scp` binary to Pi, `sudo mv` to `/usr/local/bin/fleet-agent`. +4. Templates `/etc/fleet-agent/config.toml` from args. +5. Installs `/etc/systemd/system/fleet-agent.service`. +6. `systemctl daemon-reload && systemctl enable --now fleet-agent`. 7. Waits up to 15s for "connected to NATS" in journal. **systemd unit:** @@ -586,8 +586,8 @@ Wants=network-online.target [Service] Type=simple -User=iot-agent -ExecStart=/usr/local/bin/iot-agent +User=fleet-agent +ExecStart=/usr/local/bin/fleet-agent Restart=on-failure RestartSec=5 StandardOutput=journal @@ -602,9 +602,9 @@ WantedBy=multi-user.target ```bash ./install.sh --host --device-id pi-demo-01 \ --nats-url nats://central:4222 \ - --nats-user iot-agent --nats-pass dev-shared-password -ssh iot-agent@ 'sudo systemctl status iot-agent' # active (running) -ssh iot-agent@ 'sudo journalctl -u iot-agent --since "2 minutes ago"' | grep "connected to NATS" + --nats-user fleet-agent --nats-pass dev-shared-password +ssh fleet-agent@ 'sudo systemctl status fleet-agent' # active (running) +ssh fleet-agent@ 'sudo journalctl -u fleet-agent --since "2 minutes ago"' | grep "connected to NATS" ``` **Time limit:** 2 hours agent time. @@ -613,7 +613,7 @@ ssh iot-agent@ 'sudo journalctl -u iot-agent --since "2 minutes ago"' | g **Goal:** One command runs full demo flow. -**Deliverable:** `iot/scripts/demo.sh`: +**Deliverable:** `fleet/scripts/demo.sh`: 1. Verifies Pi reachable + agent running. 2. Applies `scripts/demo-deployment.yaml`. 3. Waits up to 120s for container on Pi (ssh + `podman ps`). @@ -624,7 +624,7 @@ ssh iot-agent@ 'sudo journalctl -u iot-agent --since "2 minutes ago"' | g **Self-verification:** ```bash -./iot/scripts/demo.sh +./fleet/scripts/demo.sh # Ends with "PASS", total < 5 min ``` diff --git a/examples/iot_load_test/Cargo.toml b/examples/fleet_load_test/Cargo.toml similarity index 81% rename from examples/iot_load_test/Cargo.toml rename to examples/fleet_load_test/Cargo.toml index e83db8da..7456f570 100644 --- a/examples/iot_load_test/Cargo.toml +++ b/examples/fleet_load_test/Cargo.toml @@ -1,16 +1,16 @@ [package] -name = "example_iot_load_test" +name = "example_fleet_load_test" version.workspace = true edition = "2024" license.workspace = true [[bin]] -name = "iot_load_test" +name = "fleet_load_test" path = "src/main.rs" [dependencies] harmony-reconciler-contracts = { path = "../../harmony-reconciler-contracts" } -iot-operator-v0 = { path = "../../iot/iot-operator-v0" } +harmony-fleet-operator = { path = "../../fleet/harmony-fleet-operator" } async-nats = { workspace = true } chrono = { workspace = true } kube = { workspace = true, features = ["runtime", "derive"] } diff --git a/examples/iot_load_test/src/main.rs b/examples/fleet_load_test/src/main.rs similarity index 97% rename from examples/iot_load_test/src/main.rs rename to examples/fleet_load_test/src/main.rs index b3e89d8f..0761f3dd 100644 --- a/examples/iot_load_test/src/main.rs +++ b/examples/fleet_load_test/src/main.rs @@ -13,13 +13,13 @@ //! - k8s cluster with the operator's CRD installed (KUBECONFIG) //! - the operator process running against the same NATS + cluster //! -//! The `iot/scripts/smoke-a4.sh` script brings all three up — pass +//! The `fleet/scripts/smoke-a4.sh` script brings all three up — pass //! `--hold` to leave them running, then run this binary. //! //! Typical invocation: //! -//! cargo run -q -p example_iot_load_test -- \ -//! --namespace iot-load \ +//! cargo run -q -p example_fleet_load_test -- \ +//! --namespace fleet-load \ //! --groups 55,5,5,5,5,5,5,5,5,5 \ //! --tick-ms 1000 \ //! --duration-s 60 @@ -28,12 +28,14 @@ use anyhow::{Context, Result}; use async_nats::jetstream::{self, kv}; use chrono::Utc; use clap::Parser; +use harmony_fleet_operator::crd::{ + Deployment, DeploymentSpec, Rollout, RolloutStrategy, ScorePayload, +}; use harmony_reconciler_contracts::{ BUCKET_DEVICE_HEARTBEAT, BUCKET_DEVICE_INFO, BUCKET_DEVICE_STATE, DeploymentName, DeploymentState, DeviceInfo, HeartbeatPayload, Id, Phase, device_heartbeat_key, device_info_key, device_state_key, }; -use iot_operator_v0::crd::{Deployment, DeploymentSpec, Rollout, RolloutStrategy, ScorePayload}; use k8s_openapi::api::core::v1::Namespace; use k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector; use kube::Client; @@ -47,7 +49,7 @@ use tokio::task::JoinSet; #[derive(Parser, Debug, Clone)] #[command( - name = "iot_load_test", + name = "fleet_load_test", about = "Synthetic load for the IoT operator's fleet_aggregator" )] struct Cli { @@ -57,7 +59,7 @@ struct Cli { /// k8s namespace for the load-test Deployment CRs. Created if /// missing. - #[arg(long, default_value = "iot-load")] + #[arg(long, default_value = "fleet-load")] namespace: String, /// Group shape — comma-separated device counts, one per CR. @@ -112,7 +114,7 @@ async fn main() -> Result<()> { shape = ?group_sizes, tick_ms = cli.tick_ms, duration_s = cli.duration_s, - "iot_load_test starting" + "fleet_load_test starting" ); // --- NATS setup ---------------------------------------------------------- @@ -361,7 +363,7 @@ async fn ensure_namespace(client: &Client, name: &str) -> Result<()> { } async fn apply_crs(api: &Api, plan: &Plan) -> Result<()> { - let params = PatchParams::apply("iot-load-test").force(); + let params = PatchParams::apply("fleet-load-test").force(); let started = Instant::now(); // Cap concurrency so we don't overwhelm the apiserver on large diff --git a/examples/iot_nats_install/Cargo.toml b/examples/fleet_nats_install/Cargo.toml similarity index 79% rename from examples/iot_nats_install/Cargo.toml rename to examples/fleet_nats_install/Cargo.toml index 428f62a6..8a5bfd4b 100644 --- a/examples/iot_nats_install/Cargo.toml +++ b/examples/fleet_nats_install/Cargo.toml @@ -1,11 +1,11 @@ [package] -name = "example_iot_nats_install" +name = "example_fleet_nats_install" version.workspace = true edition = "2024" license.workspace = true [[bin]] -name = "iot_nats_install" +name = "fleet_nats_install" path = "src/main.rs" [dependencies] diff --git a/examples/iot_nats_install/src/main.rs b/examples/fleet_nats_install/src/main.rs similarity index 88% rename from examples/iot_nats_install/src/main.rs rename to examples/fleet_nats_install/src/main.rs index 135dbb68..8270abca 100644 --- a/examples/iot_nats_install/src/main.rs +++ b/examples/fleet_nats_install/src/main.rs @@ -4,8 +4,8 @@ //! This binary is the glue between the smoke harness (`smoke-a4.sh`) //! and the framework Score. Typical usage from a demo script: //! -//! KUBECONFIG=$KUBECFG cargo run -q -p example_iot_nats_install \ -//! -- --namespace iot-system --name iot-nats --node-port 4222 +//! KUBECONFIG=$KUBECFG cargo run -q -p example_fleet_nats_install \ +//! -- --namespace fleet-system --name fleet-nats --node-port 4222 //! //! Behaviour: //! - Ensures the target namespace exists @@ -25,15 +25,15 @@ use harmony::score::Score; #[derive(Parser, Debug)] #[command( - name = "iot_nats_install", + name = "fleet_nats_install", about = "Install single-node NATS (JetStream) via NatsBasicScore" )] struct Cli { /// Target namespace. Created if missing. - #[arg(long, default_value = "iot-system")] + #[arg(long, default_value = "fleet-system")] namespace: String, /// Resource name for the NATS Deployment + Service. - #[arg(long, default_value = "iot-nats")] + #[arg(long, default_value = "fleet-nats")] name: String, /// Service exposure mode. `load-balancer` pairs with k3d's /// `-p PORT:PORT@loadbalancer` port mapping (direct service- @@ -62,7 +62,7 @@ enum ExposeMode { async fn main() -> Result<()> { let cli = Cli::parse(); - let topology = K8sBareTopology::from_kubeconfig("iot-nats-install") + let topology = K8sBareTopology::from_kubeconfig("fleet-nats-install") .await .map_err(|e| anyhow::anyhow!(e)) .context("building K8sBareTopology from KUBECONFIG")?; diff --git a/examples/iot_vm_setup/Cargo.toml b/examples/fleet_vm_setup/Cargo.toml similarity index 86% rename from examples/iot_vm_setup/Cargo.toml rename to examples/fleet_vm_setup/Cargo.toml index 7bc93e10..1f495e17 100644 --- a/examples/iot_vm_setup/Cargo.toml +++ b/examples/fleet_vm_setup/Cargo.toml @@ -1,11 +1,11 @@ [package] -name = "example_iot_vm_setup" +name = "example_fleet_vm_setup" version.workspace = true edition = "2024" license.workspace = true [[bin]] -name = "iot_vm_setup" +name = "fleet_vm_setup" path = "src/main.rs" [dependencies] diff --git a/examples/iot_vm_setup/README.md b/examples/fleet_vm_setup/README.md similarity index 84% rename from examples/iot_vm_setup/README.md rename to examples/fleet_vm_setup/README.md index ab44915f..a5b57087 100644 --- a/examples/iot_vm_setup/README.md +++ b/examples/fleet_vm_setup/README.md @@ -6,8 +6,8 @@ Harmony Scores in sequence: 1. **`KvmVmScore`** — provision a libvirt VM from an Ubuntu 24.04 cloud image with a cloud-init seed ISO that authorizes one SSH key. Returns the booted VM's IP. -2. **`IotDeviceSetupScore`** — SSH into the VM (via the Ansible-backed - `HostConfigurationProvider`) and install podman + the `iot-agent` +2. **`FleetDeviceSetupScore`** — SSH into the VM (via the Ansible-backed + `HostConfigurationProvider`) and install podman + the `fleet-agent` binary, drop the TOML config, bring up the systemd unit. After a successful run, the VM is a fleet member reporting to NATS under @@ -42,21 +42,21 @@ sudo virsh net-autostart default ## Run ```bash -cargo build -p iot-agent-v0 +cargo build -p fleet-agent-v0 cargo run -p example_iot_vm_setup -- \ --base-image /var/tmp/harmony-iot-smoke/ubuntu-24.04-server-cloudimg-amd64.img \ --ssh-pubkey /var/tmp/harmony-iot-smoke/ssh/id_ed25519.pub \ --ssh-privkey /var/tmp/harmony-iot-smoke/ssh/id_ed25519 \ --work-dir /var/tmp/harmony-iot-smoke \ - --agent-binary target/debug/iot-agent-v0 \ + --agent-binary target/debug/fleet-agent-v0 \ --nats-url nats://192.168.122.1:4222 ``` ## Changing groups Re-running with a different `--group` rewrites -`/etc/iot-agent/config.toml` on the VM and restarts the agent. The VM +`/etc/fleet-agent/config.toml` on the VM and restarts the agent. The VM itself is untouched. ```bash @@ -65,5 +65,5 @@ cargo run -p example_iot_vm_setup -- ... --group group-b ## Full end-to-end via smoke test -See `iot/scripts/smoke-a3.sh` — stands up NATS in a podman container, +See `fleet/scripts/smoke-a3.sh` — stands up NATS in a podman container, runs this example, asserts the agent's status lands in NATS. diff --git a/examples/iot_vm_setup/src/main.rs b/examples/fleet_vm_setup/src/main.rs similarity index 87% rename from examples/iot_vm_setup/src/main.rs rename to examples/fleet_vm_setup/src/main.rs index d5499cd0..2610047f 100644 --- a/examples/iot_vm_setup/src/main.rs +++ b/examples/fleet_vm_setup/src/main.rs @@ -5,15 +5,15 @@ //! capability. Here we satisfy it with `KvmVirtualMachineHost` //! (libvirt). Swapping to VMware/Proxmox/cloud would be a //! different topology injection with the same Score code. -//! 2. `IotDeviceSetupScore` — SSHes into the booted VM and installs -//! podman + iot-agent via the split Linux-host capabilities. +//! 2. `FleetDeviceSetupScore` — SSHes into the booted VM and installs +//! podman + fleet-agent via the split Linux-host capabilities. use anyhow::{Context, Result}; use clap::Parser; use harmony::inventory::Inventory; -use harmony::modules::iot::{ - IotDeviceSetupConfig, IotDeviceSetupScore, ProvisionVmScore, - check_iot_smoke_preflight_for_arch, ensure_iot_ssh_keypair, +use harmony::modules::fleet::{ + FleetDeviceSetupConfig, FleetDeviceSetupScore, ProvisionVmScore, + check_fleet_smoke_preflight_for_arch, ensure_fleet_ssh_keypair, }; use harmony::modules::kvm::KvmVirtualMachineHost; use harmony::modules::kvm::config::init_executor; @@ -42,7 +42,7 @@ impl From for VmArchitecture { #[derive(Parser, Debug)] #[command( - name = "iot_vm_setup", + name = "fleet_vm_setup", about = "Provision one VM + onboard it into the IoT fleet" )] struct Cli { @@ -51,7 +51,7 @@ struct Cli { #[arg(long, value_enum, default_value_t = CliArch::X86_64)] arch: CliArch, /// libvirt domain name for the VM. - #[arg(long, default_value = "iot-vm-01")] + #[arg(long, default_value = "fleet-vm-01")] vm_name: String, /// Device id the agent will announce to NATS. Defaults to a /// fresh `Id` (hex timestamp + random suffix). @@ -69,16 +69,16 @@ struct Cli { #[arg(long, default_value = "default")] network: String, /// Admin username created on first boot. - #[arg(long, default_value = "iot-admin")] + #[arg(long, default_value = "fleet-admin")] admin_user: String, /// Optional plaintext password for the admin user. Enables SSH /// password auth on the guest — intended for interactive /// debugging / reliability-testing sessions where the operator /// wants to break things on purpose. Leave unset for key-only /// auth (production default). - #[arg(long, env = "IOT_VM_ADMIN_PASSWORD")] + #[arg(long, env = "FLEET_VM_ADMIN_PASSWORD")] admin_password: Option, - /// Path to the cross-compiled iot-agent binary. + /// Path to the cross-compiled fleet-agent binary. /// Required unless `--bootstrap-only` is set. #[arg(long)] agent_binary: Option, @@ -111,7 +111,7 @@ async fn main() -> Result<()> { let cli = Cli::parse(); let arch: VmArchitecture = cli.arch.into(); - check_iot_smoke_preflight_for_arch(arch) + check_fleet_smoke_preflight_for_arch(arch) .await .map_err(|e| anyhow::anyhow!("{e}"))?; @@ -119,13 +119,13 @@ async fn main() -> Result<()> { harmony::modules::linux::ensure_ansible_venv() .await .map_err(|e| anyhow::anyhow!("ansible venv: {e}"))?; - harmony::modules::iot::ensure_ubuntu_2404_cloud_image_for_arch(arch) + harmony::modules::fleet::ensure_ubuntu_2404_cloud_image_for_arch(arch) .await .map_err(|e| anyhow::anyhow!("cloud image: {e}"))?; - ensure_iot_ssh_keypair() + ensure_fleet_ssh_keypair() .await .map_err(|e| anyhow::anyhow!("ssh keypair: {e}"))?; - harmony::modules::iot::ensure_harmony_iot_pool() + harmony::modules::fleet::ensure_harmony_fleet_pool() .await .map_err(|e| anyhow::anyhow!("libvirt pool: {e}"))?; println!("bootstrap complete"); @@ -133,16 +133,16 @@ async fn main() -> Result<()> { } // --- Step 1: provision the VM --- - let base_image = harmony::modules::iot::ensure_ubuntu_2404_cloud_image_for_arch(arch) + let base_image = harmony::modules::fleet::ensure_ubuntu_2404_cloud_image_for_arch(arch) .await .map_err(|e| anyhow::anyhow!("cloud image: {e}"))?; - let pool = harmony::modules::iot::ensure_harmony_iot_pool() + let pool = harmony::modules::fleet::ensure_harmony_fleet_pool() .await .map_err(|e| anyhow::anyhow!("libvirt pool: {e}"))?; - let ssh = ensure_iot_ssh_keypair() + let ssh = ensure_fleet_ssh_keypair() .await .map_err(|e| anyhow::anyhow!("ssh keypair: {e}"))?; - let authorized_key = harmony::modules::iot::read_public_key(&ssh) + let authorized_key = harmony::modules::fleet::read_public_key(&ssh) .await .map_err(|e| anyhow::anyhow!("read ssh pubkey: {e}"))?; @@ -182,7 +182,7 @@ async fn main() -> Result<()> { let agent_binary = cli .agent_binary .clone() - .context("--agent-binary is required (e.g. target/release/iot-agent-v0)")?; + .context("--agent-binary is required (e.g. target/release/fleet-agent-v0)")?; let device_id = cli .device_id .clone() @@ -206,7 +206,7 @@ async fn main() -> Result<()> { .collect::>() .join(","); - let setup_score = IotDeviceSetupScore::new(IotDeviceSetupConfig { + let setup_score = FleetDeviceSetupScore::new(FleetDeviceSetupConfig { device_id: device_id.clone(), labels, nats_urls: vec![cli.nats_url.clone()], @@ -262,14 +262,17 @@ async fn run_vm_score( anyhow::bail!("ProvisionVmScore finished without reporting an IP: {outcome:?}") } -async fn run_setup_score(score: &IotDeviceSetupScore, topology: &LinuxHostTopology) -> Result<()> { +async fn run_setup_score( + score: &FleetDeviceSetupScore, + topology: &LinuxHostTopology, +) -> Result<()> { use harmony::score::Score; let inventory = Inventory::empty(); let interpret = Score::::create_interpret(score); let outcome = interpret .execute(&inventory, topology) .await - .map_err(|e| anyhow::anyhow!("IotDeviceSetupScore execute: {e}"))?; + .map_err(|e| anyhow::anyhow!("FleetDeviceSetupScore execute: {e}"))?; println!("setup: {} ({:?})", outcome.message, outcome.details); Ok(()) } diff --git a/examples/harmony_apply_deployment/Cargo.toml b/examples/harmony_apply_deployment/Cargo.toml index 5fa20e32..d0736fe2 100644 --- a/examples/harmony_apply_deployment/Cargo.toml +++ b/examples/harmony_apply_deployment/Cargo.toml @@ -10,7 +10,7 @@ path = "src/main.rs" [dependencies] harmony = { path = "../../harmony", default-features = false, features = ["podman"] } -iot-operator-v0 = { path = "../../iot/iot-operator-v0" } +harmony-fleet-operator = { path = "../../fleet/harmony-fleet-operator" } kube = { workspace = true, features = ["runtime", "derive"] } k8s-openapi = { workspace = true } serde_json.workspace = true diff --git a/examples/harmony_apply_deployment/src/main.rs b/examples/harmony_apply_deployment/src/main.rs index bdd0b3aa..904e74be 100644 --- a/examples/harmony_apply_deployment/src/main.rs +++ b/examples/harmony_apply_deployment/src/main.rs @@ -12,7 +12,7 @@ //! (not `iot_`-anything), in line with the review call to position //! the operator as a generic fleet/reconcile tool. //! -//! The CRD types live in `iot_operator_v0::crd`; the score types +//! The CRD types live in `harmony_fleet_operator::crd`; the score types //! live in `harmony::modules::podman` (PodmanV0 being the first //! reconciler variant — future variants drop in alongside). //! @@ -20,17 +20,17 @@ //! //! # apply an nginx deployment //! cargo run -q -p example_harmony_apply_deployment -- \ -//! --target-device iot-smoke-vm-arm \ +//! --target-device fleet-smoke-vm-arm \ //! --image nginx:latest //! //! # print the CR JSON (lets the user kubectl-apply it manually) //! cargo run -q -p example_harmony_apply_deployment -- \ -//! --target-device iot-smoke-vm-arm \ +//! --target-device fleet-smoke-vm-arm \ //! --image nginx:latest --print | kubectl apply -f - //! //! # upgrade the same deployment to a newer image //! cargo run -q -p example_harmony_apply_deployment -- \ -//! --target-device iot-smoke-vm-arm \ +//! --target-device fleet-smoke-vm-arm \ //! --image nginx:1.26 //! //! # delete the deployment @@ -39,7 +39,9 @@ use anyhow::{Context, Result}; use clap::Parser; use harmony::modules::podman::{PodmanService, PodmanV0Score}; -use iot_operator_v0::crd::{Deployment, DeploymentSpec, Rollout, RolloutStrategy, ScorePayload}; +use harmony_fleet_operator::crd::{ + Deployment, DeploymentSpec, Rollout, RolloutStrategy, ScorePayload, +}; use k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector; use kube::Client; use kube::api::{Api, DeleteParams, Patch, PatchParams}; @@ -52,7 +54,7 @@ use std::collections::BTreeMap; )] struct Cli { /// Kubernetes namespace for the Deployment CR. - #[arg(long, default_value = "iot-demo")] + #[arg(long, default_value = "fleet-demo")] namespace: String, /// Deployment CR name. Also used as the KV key suffix and /// podman container name on the device. @@ -62,7 +64,7 @@ struct Cli { /// `--selector device-id=` — the agent publishes /// a `device-id=` label on its DeviceInfo by default so this /// works without any cluster-side label pre-wiring. - #[arg(long, default_value = "iot-smoke-vm")] + #[arg(long, default_value = "fleet-smoke-vm")] target_device: String, /// Repeatable `key=value` label selector. Takes precedence over /// `--target-device` when provided. All pairs AND together. @@ -143,7 +145,7 @@ fn build_cr(cli: &Cli) -> Deployment { type_: "PodmanV0".to_string(), // `ScorePayload::data` is `serde_json::Value` by design // (opaque payload routed to the agent). Serialize the typed - // score through serde_json — the agent's `IotScore` enum + // score through serde_json — the agent's `ReconcileScore` enum // accepts exactly this shape via `#[serde(tag, content)]`. data: serde_json::to_value(&score).expect("PodmanV0Score is JSON-clean"), }; diff --git a/iot/iot-agent-v0/Cargo.toml b/fleet/harmony-fleet-agent/Cargo.toml similarity index 91% rename from iot/iot-agent-v0/Cargo.toml rename to fleet/harmony-fleet-agent/Cargo.toml index f90e9e65..8cd98369 100644 --- a/iot/iot-agent-v0/Cargo.toml +++ b/fleet/harmony-fleet-agent/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "iot-agent-v0" +name = "harmony-fleet-agent" version = "0.1.0" edition = "2024" rust-version = "1.85" diff --git a/iot/iot-agent-v0/src/config.rs b/fleet/harmony-fleet-agent/src/config.rs similarity index 100% rename from iot/iot-agent-v0/src/config.rs rename to fleet/harmony-fleet-agent/src/config.rs diff --git a/iot/iot-agent-v0/src/fleet_publisher.rs b/fleet/harmony-fleet-agent/src/fleet_publisher.rs similarity index 100% rename from iot/iot-agent-v0/src/fleet_publisher.rs rename to fleet/harmony-fleet-agent/src/fleet_publisher.rs diff --git a/iot/iot-agent-v0/src/main.rs b/fleet/harmony-fleet-agent/src/main.rs similarity index 96% rename from iot/iot-agent-v0/src/main.rs rename to fleet/harmony-fleet-agent/src/main.rs index b8546847..3b388349 100644 --- a/iot/iot-agent-v0/src/main.rs +++ b/fleet/harmony-fleet-agent/src/main.rs @@ -23,12 +23,12 @@ use crate::reconciler::Reconciler; const RECONCILE_INTERVAL: Duration = Duration::from_secs(30); #[derive(Parser)] -#[command(name = "iot-agent-v0", about = "IoT agent for Raspberry Pi devices")] +#[command(name = "fleet-agent-v0", about = "IoT agent for Raspberry Pi devices")] struct Cli { #[arg( long, - env = "IOT_AGENT_CONFIG", - default_value = "/etc/iot-agent/config.toml" + env = "FLEET_AGENT_CONFIG", + default_value = "/etc/fleet-agent/config.toml" )] config: std::path::PathBuf, } @@ -138,7 +138,7 @@ async fn main() -> Result<()> { let cli = Cli::parse(); let cfg = config::load_config(&cli.config)?; - tracing::info!(device_id = %cfg.agent.device_id, "iot-agent-v0 starting"); + tracing::info!(device_id = %cfg.agent.device_id, "fleet-agent-v0 starting"); let device_id = cfg.agent.device_id.clone(); diff --git a/iot/iot-agent-v0/src/reconciler.rs b/fleet/harmony-fleet-agent/src/reconciler.rs similarity index 98% rename from iot/iot-agent-v0/src/reconciler.rs rename to fleet/harmony-fleet-agent/src/reconciler.rs index c46d862a..619d9bf0 100644 --- a/iot/iot-agent-v0/src/reconciler.rs +++ b/fleet/harmony-fleet-agent/src/reconciler.rs @@ -8,7 +8,7 @@ use harmony_reconciler_contracts::{DeploymentName, DeploymentState, Id, Phase}; use tokio::sync::Mutex; use harmony::inventory::Inventory; -use harmony::modules::podman::{IotScore, PodmanTopology, PodmanV0Score}; +use harmony::modules::podman::{PodmanTopology, PodmanV0Score, ReconcileScore}; use harmony::score::Score; use crate::fleet_publisher::FleetPublisher; @@ -107,8 +107,8 @@ impl Reconciler { /// key. pub async fn apply(&self, key: &str, value: &[u8]) -> Result<()> { let deployment = deployment_from_key(key); - let incoming = match serde_json::from_slice::(value) { - Ok(IotScore::PodmanV0(s)) => s, + let incoming = match serde_json::from_slice::(value) { + Ok(ReconcileScore::PodmanV0(s)) => s, Err(e) => { tracing::warn!(key, error = %e, "failed to deserialize score"); if let Some(name) = &deployment { diff --git a/iot/iot-operator-v0/Cargo.toml b/fleet/harmony-fleet-operator/Cargo.toml similarity index 91% rename from iot/iot-operator-v0/Cargo.toml rename to fleet/harmony-fleet-operator/Cargo.toml index dafc5fbe..3fe5a2d4 100644 --- a/iot/iot-operator-v0/Cargo.toml +++ b/fleet/harmony-fleet-operator/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "iot-operator-v0" +name = "harmony-fleet-operator" version = "0.1.0" edition = "2024" rust-version = "1.85" diff --git a/iot/iot-operator-v0/Dockerfile b/fleet/harmony-fleet-operator/Dockerfile similarity index 84% rename from iot/iot-operator-v0/Dockerfile rename to fleet/harmony-fleet-operator/Dockerfile index 4cfc61b5..0eb0b632 100644 --- a/iot/iot-operator-v0/Dockerfile +++ b/fleet/harmony-fleet-operator/Dockerfile @@ -1,5 +1,5 @@ # Minimal runtime container for the IoT operator. Assumes -# `target/release/iot-operator-v0` has already been built on the +# `target/release/harmony-fleet-operator` has already been built on the # host (the load-test harness does this). Base image is # archlinux:base to guarantee the host's glibc (ABI-matched) — # debian:bookworm-slim and similar distros ship older glibcs and @@ -10,7 +10,7 @@ # toolchain image. FROM docker.io/library/archlinux:base -COPY target/release/iot-operator-v0 /usr/local/bin/iot-operator-v0 +COPY target/release/harmony-fleet-operator /usr/local/bin/harmony-fleet-operator # Non-root runtime. Pairs with the Pod's `securityContext. # runAsNonRoot: true` in the helm chart — k8s admission rejects @@ -23,4 +23,4 @@ COPY target/release/iot-operator-v0 /usr/local/bin/iot-operator-v0 # arbitrary but safe — no overlap with typical system UIDs. USER 65532:65532 -ENTRYPOINT ["/usr/local/bin/iot-operator-v0"] +ENTRYPOINT ["/usr/local/bin/harmony-fleet-operator"] diff --git a/iot/iot-operator-v0/src/chart.rs b/fleet/harmony-fleet-operator/src/chart.rs similarity index 94% rename from iot/iot-operator-v0/src/chart.rs rename to fleet/harmony-fleet-operator/src/chart.rs index 26ce32ae..a8e4138c 100644 --- a/iot/iot-operator-v0/src/chart.rs +++ b/fleet/harmony-fleet-operator/src/chart.rs @@ -39,7 +39,7 @@ use crate::crd::{Deployment, Device}; /// local-dev k3d install; override via the `chart` subcommand flags. pub struct ChartOptions { /// Where to write the chart directory. The chart is created as a - /// subdirectory `iot-operator-v0` inside this path. + /// subdirectory `harmony-fleet-operator` inside this path. pub output_dir: PathBuf, /// Container image tag the operator Deployment should pull. For /// k3d with sideloaded images, `IfNotPresent` + a tag that's @@ -55,7 +55,7 @@ pub struct ChartOptions { /// reusable across namespaces. pub namespace: String, /// NATS URL the operator connects to. For in-cluster NATS at - /// `iot-nats.iot-system` the default `nats://iot-nats.iot-system:4222` + /// `fleet-nats.fleet-system` the default `nats://fleet-nats.fleet-system:4222` /// works with no config. pub nats_url: String, /// `RUST_LOG` value for the operator process. @@ -65,20 +65,20 @@ pub struct ChartOptions { impl Default for ChartOptions { fn default() -> Self { Self { - output_dir: PathBuf::from("/tmp/iot-load-test/chart"), - image: "localhost/iot-operator-v0:latest".to_string(), + output_dir: PathBuf::from("/tmp/fleet-load-test/chart"), + image: "localhost/harmony-fleet-operator:latest".to_string(), image_pull_policy: "IfNotPresent".to_string(), - namespace: "iot-system".to_string(), - nats_url: "nats://iot-nats.iot-system:4222".to_string(), + namespace: "fleet-system".to_string(), + nats_url: "nats://fleet-nats.fleet-system:4222".to_string(), log_level: "info,kube_runtime=warn".to_string(), } } } -const RELEASE_NAME: &str = "iot-operator-v0"; -const SERVICE_ACCOUNT: &str = "iot-operator-v0"; -const CLUSTER_ROLE: &str = "iot-operator-v0"; -const CLUSTER_ROLE_BINDING: &str = "iot-operator-v0"; +const RELEASE_NAME: &str = "harmony-fleet-operator"; +const SERVICE_ACCOUNT: &str = "harmony-fleet-operator"; +const CLUSTER_ROLE: &str = "harmony-fleet-operator"; +const CLUSTER_ROLE_BINDING: &str = "harmony-fleet-operator"; /// Build + write the chart to `opts.output_dir`. Returns the full /// path to the generated chart directory (which is what `helm @@ -143,7 +143,7 @@ fn service_account(namespace: &str) -> ServiceAccount { /// Verbs the operator actually uses — nothing aspirational. Tightening /// later is a matter of deleting a line. fn cluster_role() -> ClusterRole { - let group = "iot.nationtech.io".to_string(); + let group = "fleet.nationtech.io".to_string(); ClusterRole { metadata: ObjectMeta { name: Some(CLUSTER_ROLE.to_string()), diff --git a/iot/iot-operator-v0/src/controller.rs b/fleet/harmony-fleet-operator/src/controller.rs similarity index 98% rename from iot/iot-operator-v0/src/controller.rs rename to fleet/harmony-fleet-operator/src/controller.rs index 32fa5ccb..340da116 100644 --- a/iot/iot-operator-v0/src/controller.rs +++ b/fleet/harmony-fleet-operator/src/controller.rs @@ -36,7 +36,7 @@ use kube::{Api, Client, ResourceExt}; use crate::crd::Deployment; -const FINALIZER: &str = "iot.nationtech.io/finalizer"; +const FINALIZER: &str = "fleet.nationtech.io/finalizer"; #[derive(Debug, thiserror::Error)] pub enum Error { diff --git a/iot/iot-operator-v0/src/crd.rs b/fleet/harmony-fleet-operator/src/crd.rs similarity index 96% rename from iot/iot-operator-v0/src/crd.rs rename to fleet/harmony-fleet-operator/src/crd.rs index 54dd5121..0399af82 100644 --- a/iot/iot-operator-v0/src/crd.rs +++ b/fleet/harmony-fleet-operator/src/crd.rs @@ -13,11 +13,11 @@ use serde::{Deserialize, Serialize}; /// `Device` CRs at reconcile time; no list of device ids on spec. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, JsonSchema)] #[kube( - group = "iot.nationtech.io", + group = "fleet.nationtech.io", version = "v1alpha1", kind = "Deployment", plural = "deployments", - shortname = "iotdep", + shortname = "fleetdep", namespaced, status = "DeploymentStatus" )] @@ -43,7 +43,7 @@ pub struct ScorePayload { /// /// 1. `x-kubernetes-preserve-unknown-fields: true` on `data` — the payload /// is routed opaquely; its shape is enforced on-device by the agent's -/// typed `IotScore` deserialization, not by the apiserver. +/// typed `ReconcileScore` deserialization, not by the apiserver. /// 2. An `x-kubernetes-validations` CEL rule on the enclosing `score` object /// requiring `type` to be a valid Rust identifier, so typos (`"pdoman"`) /// are rejected at `kubectl apply` time rather than silently reaching @@ -155,11 +155,11 @@ pub struct AggregateLastError { /// rather than sitting here as speculative surface. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, JsonSchema)] #[kube( - group = "iot.nationtech.io", + group = "fleet.nationtech.io", version = "v1alpha1", kind = "Device", plural = "devices", - shortname = "iotdevice" + shortname = "fleetdev" )] #[serde(rename_all = "camelCase")] pub struct DeviceSpec { diff --git a/iot/iot-operator-v0/src/device_reconciler.rs b/fleet/harmony-fleet-operator/src/device_reconciler.rs similarity index 97% rename from iot/iot-operator-v0/src/device_reconciler.rs rename to fleet/harmony-fleet-operator/src/device_reconciler.rs index a5b10e94..6f2dba9b 100644 --- a/iot/iot-operator-v0/src/device_reconciler.rs +++ b/fleet/harmony-fleet-operator/src/device_reconciler.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use crate::crd::{Device, DeviceSpec}; -const FIELD_MANAGER: &str = "iot-operator-device-reconciler"; +const FIELD_MANAGER: &str = "harmony-fleet-operator-device-reconciler"; pub async fn run(client: Client, js: async_nats::jetstream::Context) -> Result<()> { let bucket = js @@ -150,7 +150,7 @@ mod tests { fn label_cleaner_accepts_common_cases() { assert!(is_label_key("group")); assert!(is_label_key("arch")); - assert!(is_label_key("iot.nationtech.io/region")); + assert!(is_label_key("fleet.nationtech.io/region")); assert!(is_label_value("aarch64")); assert!(is_label_value("site-01")); } diff --git a/iot/iot-operator-v0/src/fleet_aggregator.rs b/fleet/harmony-fleet-operator/src/fleet_aggregator.rs similarity index 99% rename from iot/iot-operator-v0/src/fleet_aggregator.rs rename to fleet/harmony-fleet-operator/src/fleet_aggregator.rs index a7f5613a..e333865a 100644 --- a/iot/iot-operator-v0/src/fleet_aggregator.rs +++ b/fleet/harmony-fleet-operator/src/fleet_aggregator.rs @@ -776,7 +776,7 @@ mod tests { #[test] fn compute_aggregate_counts_matched_devices() { - let cached = cached("iot-demo", "hello", "group", "edge-a"); + let cached = cached("fleet-demo", "hello", "group", "edge-a"); let key = cached.key.clone(); let mut s = FleetState::default(); diff --git a/iot/iot-operator-v0/src/install.rs b/fleet/harmony-fleet-operator/src/install.rs similarity index 95% rename from iot/iot-operator-v0/src/install.rs rename to fleet/harmony-fleet-operator/src/install.rs index 1e733999..57b44e1e 100644 --- a/iot/iot-operator-v0/src/install.rs +++ b/fleet/harmony-fleet-operator/src/install.rs @@ -22,7 +22,7 @@ use crate::crd::{Deployment, Device}; /// (e.g. with `kubectl wait --for=condition=Established`) if it /// cares. pub async fn install_crds() -> Result<()> { - let topology = K8sBareTopology::from_kubeconfig("iot-operator-install") + let topology = K8sBareTopology::from_kubeconfig("harmony-fleet-operator-install") .await .map_err(|e| anyhow::anyhow!(e)) .context("building K8sBareTopology from KUBECONFIG")?; diff --git a/iot/iot-operator-v0/src/lib.rs b/fleet/harmony-fleet-operator/src/lib.rs similarity index 100% rename from iot/iot-operator-v0/src/lib.rs rename to fleet/harmony-fleet-operator/src/lib.rs diff --git a/iot/iot-operator-v0/src/main.rs b/fleet/harmony-fleet-operator/src/main.rs similarity index 91% rename from iot/iot-operator-v0/src/main.rs rename to fleet/harmony-fleet-operator/src/main.rs index a589c5a0..0e0bd347 100644 --- a/iot/iot-operator-v0/src/main.rs +++ b/fleet/harmony-fleet-operator/src/main.rs @@ -2,7 +2,7 @@ mod chart; mod controller; mod install; -use iot_operator_v0::{crd, device_reconciler, fleet_aggregator}; +use harmony_fleet_operator::{crd, device_reconciler, fleet_aggregator}; use anyhow::Result; use async_nats::jetstream; @@ -13,7 +13,7 @@ use std::path::PathBuf; #[derive(Parser)] #[command( - name = "iot-operator-v0", + name = "harmony-fleet-operator", about = "IoT operator — Deployment CRD → NATS KV" )] struct Cli { @@ -49,15 +49,15 @@ enum Command { /// chart path on success; `helm install ` takes it from /// there. No registry publish — the chart lives on disk. Chart { - #[arg(long, default_value = "/tmp/iot-load-test/chart")] + #[arg(long, default_value = "/tmp/fleet-load-test/chart")] output: PathBuf, - #[arg(long, default_value = "localhost/iot-operator-v0:latest")] + #[arg(long, default_value = "localhost/harmony-fleet-operator:latest")] image: String, #[arg(long, default_value = "IfNotPresent")] image_pull_policy: String, - #[arg(long, default_value = "iot-system")] + #[arg(long, default_value = "fleet-system")] namespace: String, - #[arg(long, default_value = "nats://iot-nats.iot-system:4222")] + #[arg(long, default_value = "nats://fleet-nats.fleet-system:4222")] nats_url: String, #[arg(long, default_value = "info,kube_runtime=warn")] log_level: String, diff --git a/iot/scripts/load-test.sh b/fleet/scripts/load-test.sh similarity index 80% rename from iot/scripts/load-test.sh rename to fleet/scripts/load-test.sh index f32c9bb3..b5ceb9f9 100755 --- a/iot/scripts/load-test.sh +++ b/fleet/scripts/load-test.sh @@ -1,25 +1,25 @@ #!/usr/bin/env bash -# Load-test harness for the IoT operator's fleet_aggregator. +# Load-test harness for the Harmony fleet operator's fleet_aggregator. # # Brings up the minimum stack (k3d + in-cluster NATS + CRD + operator) -# with no VM or real agent, then runs the `iot_load_test` binary +# with no VM or real agent, then runs the `fleet_load_test` binary # which simulates N devices pushing DeploymentState to NATS. # -# All stable paths under $WORK_DIR (default /tmp/iot-load-test) so you +# All stable paths under $WORK_DIR (default /tmp/fleet-load-test) so you # can point kubectl / tail at them while the test is running. # # Quick usage: -# iot/scripts/load-test.sh # 100-device default (55 + 9×5) -# HOLD=1 iot/scripts/load-test.sh # leave stack running for exploration +# fleet/scripts/load-test.sh # 100-device default (55 + 9×5) +# HOLD=1 fleet/scripts/load-test.sh # leave stack running for exploration # DEVICES=10000 GROUP_SIZES=5500,500,500,500,500,500,500,500,500,500 \ -# DURATION=90 iot/scripts/load-test.sh +# DURATION=90 fleet/scripts/load-test.sh # # While it's running, in another terminal: -# export KUBECONFIG=/tmp/iot-load-test/kubeconfig -# kubectl get deployments.iot.nationtech.io -A -w -# kubectl get deployments.iot.nationtech.io -A \ +# export KUBECONFIG=/tmp/fleet-load-test/kubeconfig +# kubectl get deployments.fleet.nationtech.io -A -w +# kubectl get deployments.fleet.nationtech.io -A \ # -o custom-columns=NAME:.metadata.name,RUN:.status.aggregate.succeeded,FAIL:.status.aggregate.failed,PEND:.status.aggregate.pending -# tail -f /tmp/iot-load-test/operator.log +# tail -f /tmp/fleet-load-test/operator.log # # Set DEBUG=1 to bump RUST_LOG so the operator logs every status patch. @@ -27,14 +27,14 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -OPERATOR_DIR="$REPO_ROOT/iot/iot-operator-v0" +OPERATOR_DIR="$REPO_ROOT/fleet/harmony-fleet-operator" # ---- config ----------------------------------------------------------------- K3D_BIN="${K3D_BIN:-$HOME/.local/share/harmony/k3d/k3d}" -CLUSTER_NAME="${CLUSTER_NAME:-iot-load}" -NATS_NAMESPACE="${NATS_NAMESPACE:-iot-system}" -NATS_NAME="${NATS_NAME:-iot-nats}" +CLUSTER_NAME="${CLUSTER_NAME:-fleet-load}" +NATS_NAMESPACE="${NATS_NAMESPACE:-fleet-system}" +NATS_NAME="${NATS_NAME:-fleet-nats}" NATS_NODE_PORT="${NATS_NODE_PORT:-4222}" NATS_IMAGE="${NATS_IMAGE:-docker.io/library/nats:2.10-alpine}" @@ -42,22 +42,22 @@ DEVICES="${DEVICES:-100}" GROUP_SIZES="${GROUP_SIZES:-55,5,5,5,5,5,5,5,5,5}" TICK_MS="${TICK_MS:-1000}" DURATION="${DURATION:-60}" -NAMESPACE="${NAMESPACE:-iot-load}" +NAMESPACE="${NAMESPACE:-fleet-load}" # Keep the stack alive after the test completes so the user can poke # at CRs + NATS interactively. Ctrl-C to tear everything down. HOLD="${HOLD:-0}" # Stable working dir so kubectl + tail targets are predictable. -WORK_DIR="${WORK_DIR:-/tmp/iot-load-test}" +WORK_DIR="${WORK_DIR:-/tmp/fleet-load-test}" mkdir -p "$WORK_DIR" KUBECONFIG_FILE="$WORK_DIR/kubeconfig" OPERATOR_LOG="$WORK_DIR/operator.log" CHART_DIR="$WORK_DIR/chart" -OPERATOR_IMAGE="${OPERATOR_IMAGE:-localhost/iot-operator-v0:latest}" -OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-iot-system}" -OPERATOR_RELEASE="${OPERATOR_RELEASE:-iot-operator-v0}" +OPERATOR_IMAGE="${OPERATOR_IMAGE:-localhost/harmony-fleet-operator:latest}" +OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-fleet-system}" +OPERATOR_RELEASE="${OPERATOR_RELEASE:-harmony-fleet-operator}" OPERATOR_PID="" # unused in the helm path; kept so older trap-cleanup logic doesn't choke. log() { printf '\033[1;34m[load-test]\033[0m %s\n' "$*"; } @@ -123,7 +123,7 @@ fi log "phase 2b: install NATS via NatsBasicScore" ( cd "$REPO_ROOT" - cargo run -q --release -p example_iot_nats_install -- \ + cargo run -q --release -p example_fleet_nats_install -- \ --namespace "$NATS_NAMESPACE" \ --name "$NATS_NAME" \ --expose load-balancer @@ -147,7 +147,7 @@ done log "phase 3a: build operator release binary" ( cd "$REPO_ROOT" - cargo build -q --release -p iot-operator-v0 + cargo build -q --release -p harmony-fleet-operator ) log "phase 3b: build container image $OPERATOR_IMAGE" @@ -158,12 +158,12 @@ log "phase 3b: build container image $OPERATOR_IMAGE" IMAGE_CTX="$WORK_DIR/image-ctx" rm -rf "$IMAGE_CTX" mkdir -p "$IMAGE_CTX/target/release" -cp "$REPO_ROOT/target/release/iot-operator-v0" "$IMAGE_CTX/target/release/iot-operator-v0" -cp "$REPO_ROOT/iot/iot-operator-v0/Dockerfile" "$IMAGE_CTX/Dockerfile" +cp "$REPO_ROOT/target/release/harmony-fleet-operator" "$IMAGE_CTX/target/release/harmony-fleet-operator" +cp "$REPO_ROOT/fleet/harmony-fleet-operator/Dockerfile" "$IMAGE_CTX/Dockerfile" podman build -q -t "$OPERATOR_IMAGE" "$IMAGE_CTX" >/dev/null log "phase 3c: sideload operator image into k3d cluster" -tmptar="$(mktemp -t iot-operator-image.XXXXXX.tar)" +tmptar="$(mktemp -t harmony-fleet-operator-image.XXXXXX.tar)" podman save "$OPERATOR_IMAGE" -o "$tmptar" >/dev/null docker load -i "$tmptar" >/dev/null rm -f "$tmptar" @@ -197,9 +197,9 @@ helm upgrade --install "$OPERATOR_RELEASE" "$CHART_DIR/$OPERATOR_RELEASE" \ --wait --timeout 120s >/dev/null kubectl wait --for=condition=Established \ - "crd/deployments.iot.nationtech.io" --timeout=30s >/dev/null + "crd/deployments.fleet.nationtech.io" --timeout=30s >/dev/null kubectl wait --for=condition=Established \ - "crd/devices.iot.nationtech.io" --timeout=30s >/dev/null + "crd/devices.fleet.nationtech.io" --timeout=30s >/dev/null kubectl -n "$OPERATOR_NAMESPACE" wait --for=condition=Available \ "deployment/$OPERATOR_RELEASE" --timeout=120s >/dev/null @@ -218,22 +218,22 @@ $(printf '\033[1;32m[load-test]\033[0m stack ready. In another terminal:') export KUBECONFIG=$KUBECONFIG_FILE $(printf '\033[1mWatch CRs as they update:\033[0m') - kubectl -n $NAMESPACE get deployments.iot.nationtech.io -w + kubectl -n $NAMESPACE get deployments.fleet.nationtech.io -w $(printf '\033[1mSnapshot aggregate columns:\033[0m') - kubectl -n $NAMESPACE get deployments.iot.nationtech.io \\ + kubectl -n $NAMESPACE get deployments.fleet.nationtech.io \\ -o custom-columns=NAME:.metadata.name,MATCHED:.status.aggregate.matchedDeviceCount,OK:.status.aggregate.succeeded,FAIL:.status.aggregate.failed,PEND:.status.aggregate.pending,LAST_ERR:.status.aggregate.lastError.message $(printf '\033[1mInspect a Deployment spec (no device list — selector only):\033[0m') - kubectl -n $NAMESPACE get deployments.iot.nationtech.io/load-group-00 -o jsonpath='{.spec}' | jq + kubectl -n $NAMESPACE get deployments.fleet.nationtech.io/load-group-00 -o jsonpath='{.spec}' | jq $(printf '\033[1mFull CR status JSON for one CR:\033[0m') - kubectl -n $NAMESPACE get deployments.iot.nationtech.io/load-group-00 -o jsonpath='{.status.aggregate}' | jq + kubectl -n $NAMESPACE get deployments.fleet.nationtech.io/load-group-00 -o jsonpath='{.status.aggregate}' | jq $(printf '\033[1mList Devices + filter by label:\033[0m') - kubectl get devices.iot.nationtech.io | head -20 - kubectl get devices.iot.nationtech.io -l group=load-group-00 | head -10 - kubectl get device.iot.nationtech.io load-dev-00001 -o yaml + kubectl get devices.fleet.nationtech.io | head -20 + kubectl get devices.fleet.nationtech.io -l group=load-group-00 | head -10 + kubectl get device.fleet.nationtech.io load-dev-00001 -o yaml $(printf '\033[1mOperator log (in-cluster pod):\033[0m') kubectl -n $OPERATOR_NAMESPACE logs -f deployment/$OPERATOR_RELEASE @@ -254,10 +254,10 @@ print_banner # ---- phase 5: load test ------------------------------------------------------ -log "phase 5: run iot_load_test (devices=$DEVICES, tick=${TICK_MS}ms, duration=${DURATION}s)" +log "phase 5: run fleet_load_test (devices=$DEVICES, tick=${TICK_MS}ms, duration=${DURATION}s)" ( cd "$REPO_ROOT" - cargo build -q --release -p example_iot_load_test + cargo build -q --release -p example_fleet_load_test ) # `--no-cleanup` keeps the CRs + KV entries around after the run so @@ -273,7 +273,7 @@ if [[ "$HOLD" == "1" ]]; then LOAD_ARGS+=(--keep) fi -RUST_LOG="info" "$REPO_ROOT/target/release/iot_load_test" "${LOAD_ARGS[@]}" +RUST_LOG="info" "$REPO_ROOT/target/release/fleet_load_test" "${LOAD_ARGS[@]}" # ---- phase 6: operator log stats -------------------------------------------- diff --git a/iot/scripts/smoke-a1.sh b/fleet/scripts/smoke-a1.sh similarity index 89% rename from iot/scripts/smoke-a1.sh rename to fleet/scripts/smoke-a1.sh index 5b8d60f8..2b13befa 100755 --- a/iot/scripts/smoke-a1.sh +++ b/fleet/scripts/smoke-a1.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# End-to-end smoke test for the IoT walking skeleton (ROADMAP/iot_platform/ +# End-to-end smoke test for the IoT walking skeleton (ROADMAP/fleet_platform/ # v0_walking_skeleton.md §9.A1 and §5.4 agent dispatch). # # Deployment CR ─apply─▶ operator ─KV put─▶ NATS ◀─watch─ agent ─podman─▶ nginx @@ -22,25 +22,25 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -OPERATOR_DIR="$REPO_ROOT/iot/iot-operator-v0" -AGENT_DIR="$REPO_ROOT/iot/iot-agent-v0" +OPERATOR_DIR="$REPO_ROOT/fleet/harmony-fleet-operator" +AGENT_DIR="$REPO_ROOT/fleet/harmony-fleet-agent" K3D_BIN="${K3D_BIN:-$HOME/.local/share/harmony/k3d/k3d}" -CLUSTER_NAME="${CLUSTER_NAME:-iot-smoke}" -NATS_CONTAINER="${NATS_CONTAINER:-iot-smoke-nats}" -NATS_NET_NAME="${NATS_NET_NAME:-iot-smoke-net}" +CLUSTER_NAME="${CLUSTER_NAME:-fleet-smoke}" +NATS_CONTAINER="${NATS_CONTAINER:-fleet-smoke-nats}" +NATS_NET_NAME="${NATS_NET_NAME:-fleet-smoke-net}" NATS_IMAGE="${NATS_IMAGE:-docker.io/library/nats:2.10-alpine}" NATSBOX_IMAGE="${NATSBOX_IMAGE:-docker.io/natsio/nats-box:latest}" NATS_PORT="${NATS_PORT:-4222}" TARGET_DEVICE="${TARGET_DEVICE:-pi-demo-01}" DEPLOY_NAME="${DEPLOY_NAME:-hello-world}" -DEPLOY_NS="${DEPLOY_NS:-iot-demo}" +DEPLOY_NS="${DEPLOY_NS:-fleet-demo}" HELLO_CONTAINER="${HELLO_CONTAINER:-hello}" HELLO_PORT="${HELLO_PORT:-8080}" -OPERATOR_LOG="$(mktemp -t iot-operator.XXXXXX.log)" +OPERATOR_LOG="$(mktemp -t harmony-fleet-operator.XXXXXX.log)" OPERATOR_PID="" -AGENT_LOG="$(mktemp -t iot-agent.XXXXXX.log)" +AGENT_LOG="$(mktemp -t fleet-agent.XXXXXX.log)" AGENT_PID="" AGENT_CONFIG_FILE="" KUBECONFIG_FILE="" @@ -126,13 +126,13 @@ log "phase 2: create k3d cluster '$CLUSTER_NAME'" "$K3D_BIN" cluster delete "$CLUSTER_NAME" >/dev/null 2>&1 || true "$K3D_BIN" cluster create "$CLUSTER_NAME" --wait --timeout 90s >/dev/null -KUBECONFIG_FILE="$(mktemp -t iot-smoke-kubeconfig.XXXXXX)" +KUBECONFIG_FILE="$(mktemp -t fleet-smoke-kubeconfig.XXXXXX)" "$K3D_BIN" kubeconfig get "$CLUSTER_NAME" > "$KUBECONFIG_FILE" export KUBECONFIG="$KUBECONFIG_FILE" log "install CRD via operator's install subcommand (typed Rust — no yaml, no kubectl apply)" ( cd "$OPERATOR_DIR" && cargo run -q -- install ) >/dev/null -kubectl wait --for=condition=Established "crd/deployments.iot.nationtech.io" --timeout=30s >/dev/null +kubectl wait --for=condition=Established "crd/deployments.fleet.nationtech.io" --timeout=30s >/dev/null kubectl get ns "$DEPLOY_NS" >/dev/null 2>&1 || kubectl create namespace "$DEPLOY_NS" >/dev/null @@ -142,7 +142,7 @@ kubectl get ns "$DEPLOY_NS" >/dev/null 2>&1 || kubectl create namespace "$DEPLOY ############################################################################### log "phase 2b: apiserver rejects invalid score.type" BAD_CR=$(cat </dev/null 2>&1; then - kubectl -n "$DEPLOY_NS" delete deployment.iot.nationtech.io bad-discriminator >/dev/null 2>&1 || true +if kubectl -n "$DEPLOY_NS" get deployment.fleet.nationtech.io bad-discriminator >/dev/null 2>&1; then + kubectl -n "$DEPLOY_NS" delete deployment.fleet.nationtech.io bad-discriminator >/dev/null 2>&1 || true fail "apiserver should have rejected 'bad-discriminator' but it was persisted" fi @@ -179,7 +179,7 @@ log "phase 3: start operator" NATS_URL="nats://127.0.0.1:$NATS_PORT" \ KV_BUCKET="desired-state" \ RUST_LOG="info,kube_runtime=warn" \ - "$REPO_ROOT/target/debug/iot-operator-v0" \ + "$REPO_ROOT/target/debug/harmony-fleet-operator" \ >"$OPERATOR_LOG" 2>&1 & OPERATOR_PID=$! log "operator pid=$OPERATOR_PID (log: $OPERATOR_LOG)" @@ -207,7 +207,7 @@ log "phase 3b: build + start agent" # doesn't occupy the host port before we even start. podman rm -f "$HELLO_CONTAINER" >/dev/null 2>&1 || true -AGENT_CONFIG_FILE="$(mktemp -t iot-agent-config.XXXXXX.toml)" +AGENT_CONFIG_FILE="$(mktemp -t fleet-agent-config.XXXXXX.toml)" cat >"$AGENT_CONFIG_FILE" <"$AGENT_LOG" 2>&1 & AGENT_PID=$! log "agent pid=$AGENT_PID (log: $AGENT_LOG)" @@ -241,7 +241,7 @@ grep -q "watching KV keys" "$AGENT_LOG" \ ############################################################################### log "phase 4: apply Deployment CR" cat </dev/null -apiVersion: iot.nationtech.io/v1alpha1 +apiVersion: fleet.nationtech.io/v1alpha1 kind: Deployment metadata: name: $DEPLOY_NAME @@ -276,7 +276,7 @@ echo "$KV_VALUE" | grep -q '"image":"docker.io/library/nginx:alpine"' \ log "wait for .status.observedScoreString" OBSERVED="" for _ in $(seq 1 30); do - OBSERVED="$(kubectl -n "$DEPLOY_NS" get deployment.iot.nationtech.io "$DEPLOY_NAME" \ + OBSERVED="$(kubectl -n "$DEPLOY_NS" get deployment.fleet.nationtech.io "$DEPLOY_NAME" \ -o jsonpath='{.status.observedScoreString}' 2>/dev/null || true)" [[ -n "$OBSERVED" ]] && break sleep 1 @@ -315,7 +315,7 @@ log "nginx responded" # phase 5 — delete CR, expect cleanup via finalizer + agent ############################################################################### log "phase 5: delete Deployment CR — finalizer + agent should remove KV and container" -kubectl -n "$DEPLOY_NS" delete deployment.iot.nationtech.io "$DEPLOY_NAME" --wait=true >/dev/null +kubectl -n "$DEPLOY_NS" delete deployment.fleet.nationtech.io "$DEPLOY_NAME" --wait=true >/dev/null log "wait for KV key removal" for _ in $(seq 1 30); do diff --git a/iot/scripts/smoke-a3-arm.sh b/fleet/scripts/smoke-a3-arm.sh similarity index 69% rename from iot/scripts/smoke-a3-arm.sh rename to fleet/scripts/smoke-a3-arm.sh index 49812d5a..8cbcc6b7 100755 --- a/iot/scripts/smoke-a3-arm.sh +++ b/fleet/scripts/smoke-a3-arm.sh @@ -4,7 +4,7 @@ # native KVM when the host is already arm64). # # This is exactly equivalent to: -# ARCH=aarch64 VM_NAME=iot-smoke-vm-arm ./smoke-a3.sh +# ARCH=aarch64 VM_NAME=fleet-smoke-vm-arm ./smoke-a3.sh # with the VM name defaulted so it can live alongside an x86-64 # smoke run on the same host without clobbering libvirt state. @@ -13,9 +13,9 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" export ARCH=aarch64 -export VM_NAME="${VM_NAME:-iot-smoke-vm-arm}" +export VM_NAME="${VM_NAME:-fleet-smoke-vm-arm}" export DEVICE_ID="${DEVICE_ID:-$VM_NAME}" -export NATS_CONTAINER="${NATS_CONTAINER:-iot-smoke-nats-a3-arm}" -export NATS_NET_NAME="${NATS_NET_NAME:-iot-smoke-net-a3-arm}" +export NATS_CONTAINER="${NATS_CONTAINER:-fleet-smoke-nats-a3-arm}" +export NATS_NET_NAME="${NATS_NET_NAME:-fleet-smoke-net-a3-arm}" exec "$SCRIPT_DIR/smoke-a3.sh" "$@" diff --git a/iot/scripts/smoke-a3.sh b/fleet/scripts/smoke-a3.sh similarity index 92% rename from iot/scripts/smoke-a3.sh rename to fleet/scripts/smoke-a3.sh index 2565bfda..b0533c87 100755 --- a/iot/scripts/smoke-a3.sh +++ b/fleet/scripts/smoke-a3.sh @@ -6,7 +6,7 @@ # ssh+Ansible ◀────┘ # │ # ▼ -# IotDeviceSetupScore ──▶ podman + iot-agent on VM +# FleetDeviceSetupScore ──▶ podman + fleet-agent on VM # │ # ▼ # existing operator ──NATS────────┘ (agent joins fleet, reconciles CR) @@ -32,7 +32,7 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -VM_NAME="${VM_NAME:-iot-smoke-vm}" +VM_NAME="${VM_NAME:-fleet-smoke-vm}" DEVICE_ID="${DEVICE_ID:-$VM_NAME}" GROUP="${GROUP:-group-a}" LIBVIRT_URI="${LIBVIRT_URI:-qemu:///system}" @@ -43,8 +43,8 @@ LIBVIRT_URI="${LIBVIRT_URI:-qemu:///system}" # target, phase 4 timeout. ARCH="${ARCH:-x86-64}" -NATS_CONTAINER="${NATS_CONTAINER:-iot-smoke-nats-a3}" -NATS_NET_NAME="${NATS_NET_NAME:-iot-smoke-net-a3}" +NATS_CONTAINER="${NATS_CONTAINER:-fleet-smoke-nats-a3}" +NATS_NET_NAME="${NATS_NET_NAME:-fleet-smoke-net-a3}" NATS_IMAGE="${NATS_IMAGE:-docker.io/library/nats:2.10-alpine}" NATS_PORT="${NATS_PORT:-4222}" @@ -99,20 +99,20 @@ NAT_GW="$(virsh --connect "$LIBVIRT_URI" net-dumpxml default \ log "libvirt network gateway = $NAT_GW (VM will dial NATS at nats://$NAT_GW:$NATS_PORT)" # ---------------------------- phase 2: build --------------------------- -log "phase 2: build iot-agent-v0 for guest arch=$ARCH (release — debug binary fills cloud rootfs)" +log "phase 2: build harmony-fleet-agent for guest arch=$ARCH (release — debug binary fills cloud rootfs)" ( cd "$REPO_ROOT" if [[ -n "$AGENT_TARGET" ]]; then rustup target add "$AGENT_TARGET" >/dev/null - cargo build -q --release --target "$AGENT_TARGET" -p iot-agent-v0 + cargo build -q --release --target "$AGENT_TARGET" -p harmony-fleet-agent else - cargo build -q --release -p iot-agent-v0 + cargo build -q --release -p harmony-fleet-agent fi ) if [[ -n "$AGENT_TARGET" ]]; then - AGENT_BINARY="$REPO_ROOT/target/$AGENT_TARGET/release/iot-agent-v0" + AGENT_BINARY="$REPO_ROOT/target/$AGENT_TARGET/release/harmony-fleet-agent" else - AGENT_BINARY="$REPO_ROOT/target/release/iot-agent-v0" + AGENT_BINARY="$REPO_ROOT/target/release/harmony-fleet-agent" fi [[ -f "$AGENT_BINARY" ]] || fail "agent binary missing after build: $AGENT_BINARY" @@ -120,7 +120,7 @@ fi log "phase 3: bootstrap assets + provision VM + onboard device (arch=$EXAMPLE_ARCH)" ( cd "$REPO_ROOT" - cargo run -q --release -p example_iot_vm_setup -- \ + cargo run -q --release -p example_fleet_vm_setup -- \ --arch "$EXAMPLE_ARCH" \ --vm-name "$VM_NAME" \ --device-id "$DEVICE_ID" \ diff --git a/iot/scripts/smoke-a4.sh b/fleet/scripts/smoke-a4.sh similarity index 84% rename from iot/scripts/smoke-a4.sh rename to fleet/scripts/smoke-a4.sh index 2ca7f10b..b57d590b 100755 --- a/iot/scripts/smoke-a4.sh +++ b/fleet/scripts/smoke-a4.sh @@ -3,14 +3,14 @@ # # [k3d cluster] # ├── NATS (single-node, NodePort 4222) -# └── CRD: iot.nationtech.io/v1alpha1/Deployment +# └── CRD: fleet.nationtech.io/v1alpha1/Deployment # ▲ # │ kubectl apply / harmony_apply_deployment # │ # [host] # ├── operator (cargo run) ──▶ NATS KV desired-state # └── libvirt VM -# └── iot-agent ──▶ NATS KV (watch) ──▶ podman container +# └── fleet-agent ──▶ NATS KV (watch) ──▶ podman container # # By default the script brings the whole stack up, applies no # Deployment CR, prints a "command menu" of user-runnable one-liners, @@ -31,24 +31,24 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -OPERATOR_DIR="$REPO_ROOT/iot/iot-operator-v0" +OPERATOR_DIR="$REPO_ROOT/fleet/harmony-fleet-operator" # ---- config ----------------------------------------------------------------- K3D_BIN="${K3D_BIN:-$HOME/.local/share/harmony/k3d/k3d}" -CLUSTER_NAME="${CLUSTER_NAME:-iot-demo}" +CLUSTER_NAME="${CLUSTER_NAME:-fleet-demo}" ARCH="${ARCH:-x86-64}" -VM_NAME="${VM_NAME:-iot-demo-vm}" +VM_NAME="${VM_NAME:-fleet-demo-vm}" DEVICE_ID="${DEVICE_ID:-$VM_NAME}" GROUP="${GROUP:-group-a}" LIBVIRT_URI="${LIBVIRT_URI:-qemu:///system}" -NATS_NAMESPACE="${NATS_NAMESPACE:-iot-system}" -NATS_NAME="${NATS_NAME:-iot-nats}" +NATS_NAMESPACE="${NATS_NAMESPACE:-fleet-system}" +NATS_NAME="${NATS_NAME:-fleet-nats}" NATS_NODE_PORT="${NATS_NODE_PORT:-4222}" -DEPLOY_NS="${DEPLOY_NS:-iot-demo}" +DEPLOY_NS="${DEPLOY_NS:-fleet-demo}" DEPLOY_NAME="${DEPLOY_NAME:-hello-world}" DEPLOY_PORT="${DEPLOY_PORT:-8080:80}" @@ -62,7 +62,7 @@ SRC_IMAGE="${SRC_IMAGE:-docker.io/library/nginx:alpine}" AUTO=0 [[ "${1:-}" == "--auto" ]] && AUTO=1 -OPERATOR_LOG="$(mktemp -t iot-operator.XXXXXX.log)" +OPERATOR_LOG="$(mktemp -t harmony-fleet-operator.XXXXXX.log)" OPERATOR_PID="" KUBECONFIG_FILE="" @@ -133,7 +133,7 @@ log "phase 1: create k3d cluster '$CLUSTER_NAME' (host port $NATS_NODE_PORT → --wait --timeout 90s \ -p "${NATS_NODE_PORT}:${NATS_NODE_PORT}@loadbalancer" \ >/dev/null -KUBECONFIG_FILE="$(mktemp -t iot-demo-kubeconfig.XXXXXX)" +KUBECONFIG_FILE="$(mktemp -t fleet-demo-kubeconfig.XXXXXX)" "$K3D_BIN" kubeconfig get "$CLUSTER_NAME" > "$KUBECONFIG_FILE" export KUBECONFIG="$KUBECONFIG_FILE" @@ -162,7 +162,7 @@ fi log "phase 2b: install NATS in-cluster via NatsBasicScore (namespace=$NATS_NAMESPACE, expose=load-balancer)" ( cd "$REPO_ROOT" - cargo run -q --release -p example_iot_nats_install -- \ + cargo run -q --release -p example_fleet_nats_install -- \ --namespace "$NATS_NAMESPACE" \ --name "$NATS_NAME" \ --expose load-balancer @@ -194,7 +194,7 @@ log "phase 3: install Deployment CRD via operator \`install\` subcommand" cargo run -q -- install ) kubectl wait --for=condition=Established \ - "crd/deployments.iot.nationtech.io" --timeout=30s >/dev/null + "crd/deployments.fleet.nationtech.io" --timeout=30s >/dev/null kubectl get ns "$DEPLOY_NS" >/dev/null 2>&1 || \ kubectl create namespace "$DEPLOY_NS" >/dev/null @@ -209,7 +209,7 @@ log "phase 4: start operator (host-side) connected to nats://localhost:$NATS_NOD NATS_URL="nats://localhost:$NATS_NODE_PORT" \ KV_BUCKET="desired-state" \ RUST_LOG="info,kube_runtime=warn" \ - "$REPO_ROOT/target/release/iot-operator-v0" \ + "$REPO_ROOT/target/release/harmony-fleet-operator" \ >"$OPERATOR_LOG" 2>&1 & OPERATOR_PID=$! log "operator pid=$OPERATOR_PID (log: $OPERATOR_LOG)" @@ -269,37 +269,37 @@ fi V1_IMAGE="localdev/nginx:v1" V2_IMAGE="localdev/nginx:v2" -IMAGE_TARBALL="$(mktemp -t iot-demo-images.XXXXXX.tar)" +IMAGE_TARBALL="$(mktemp -t fleet-demo-images.XXXXXX.tar)" podman save -o "$IMAGE_TARBALL" "$SRC_IMAGE" >/dev/null \ || fail "podman save failed" log "exported $SRC_IMAGE → $IMAGE_TARBALL ($(du -h "$IMAGE_TARBALL" | cut -f1))" # ---- phase 5: provision VM + install agent ---------------------------------- -log "phase 5: build iot-agent-v0 for arch=$ARCH + provision VM" +log "phase 5: build harmony-fleet-agent for arch=$ARCH + provision VM" ( cd "$REPO_ROOT" if [[ -n "$AGENT_TARGET" ]]; then rustup target add "$AGENT_TARGET" >/dev/null - cargo build -q --release --target "$AGENT_TARGET" -p iot-agent-v0 + cargo build -q --release --target "$AGENT_TARGET" -p harmony-fleet-agent else - cargo build -q --release -p iot-agent-v0 + cargo build -q --release -p harmony-fleet-agent fi ) if [[ -n "$AGENT_TARGET" ]]; then - AGENT_BINARY="$REPO_ROOT/target/$AGENT_TARGET/release/iot-agent-v0" + AGENT_BINARY="$REPO_ROOT/target/$AGENT_TARGET/release/harmony-fleet-agent" else - AGENT_BINARY="$REPO_ROOT/target/release/iot-agent-v0" + AGENT_BINARY="$REPO_ROOT/target/release/harmony-fleet-agent" fi [[ -f "$AGENT_BINARY" ]] || fail "agent binary missing: $AGENT_BINARY" ( cd "$REPO_ROOT" - # Pass through IOT_VM_ADMIN_PASSWORD if set so the VM admin user + # Pass through FLEET_VM_ADMIN_PASSWORD if set so the VM admin user # accepts SSH password auth. Useful for chaos / reliability # testing sessions where the operator wants to log in and break # things on purpose. Unset by default = key-only auth. - cargo run -q --release -p example_iot_vm_setup -- \ + cargo run -q --release -p example_fleet_vm_setup -- \ --arch "$EXAMPLE_ARCH" \ --vm-name "$VM_NAME" \ --device-id "$DEVICE_ID" \ @@ -312,29 +312,29 @@ VM_IP="$(virsh --connect "$LIBVIRT_URI" domifaddr "$VM_NAME" \ | awk '/ipv4/ { print $4 }' | head -1 | cut -d/ -f1)" [[ -n "$VM_IP" ]] || fail "couldn't resolve VM IP" -# ---- phase 5c: sideload workload images into iot-agent's podman ------------- +# ---- phase 5c: sideload workload images into fleet-agent's podman ------------- -log "phase 5c: sideload $V1_IMAGE + $V2_IMAGE into iot-agent's podman on VM" +log "phase 5c: sideload $V1_IMAGE + $V2_IMAGE into fleet-agent's podman on VM" # scp the tarball (ssh as the admin user, the only one with sshd -# access), then `podman load` inside an iot-agent user session. -# Post-load the iot-agent's podman has both tags locally, so +# access), then `podman load` inside an fleet-agent user session. +# Post-load the fleet-agent's podman has both tags locally, so # `ensure_image_present` in harmony's PodmanTopology takes the # "already present, skip pull" branch — no Docker Hub hit. scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ - -i "$HOME/.local/share/harmony/iot/ssh/id_ed25519" \ - "$IMAGE_TARBALL" "iot-admin@$VM_IP:/tmp/iot-demo-images.tar" >/dev/null \ + -i "$HOME/.local/share/harmony/fleet/ssh/id_ed25519" \ + "$IMAGE_TARBALL" "fleet-admin@$VM_IP:/tmp/fleet-demo-images.tar" >/dev/null \ || fail "scp image tarball to VM failed" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ - -i "$HOME/.local/share/harmony/iot/ssh/id_ed25519" \ - "iot-admin@$VM_IP" -- \ - "sudo chown iot-agent:iot-agent /tmp/iot-demo-images.tar && \ - sudo su - iot-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman load -i /tmp/iot-demo-images.tar' && \ - sudo su - iot-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman tag $SRC_IMAGE $V1_IMAGE' && \ - sudo su - iot-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman tag $SRC_IMAGE $V2_IMAGE' && \ - sudo rm -f /tmp/iot-demo-images.tar" >/dev/null \ + -i "$HOME/.local/share/harmony/fleet/ssh/id_ed25519" \ + "fleet-admin@$VM_IP" -- \ + "sudo chown fleet-agent:fleet-agent /tmp/fleet-demo-images.tar && \ + sudo su - fleet-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman load -i /tmp/fleet-demo-images.tar' && \ + sudo su - fleet-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman tag $SRC_IMAGE $V1_IMAGE' && \ + sudo su - fleet-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman tag $SRC_IMAGE $V2_IMAGE' && \ + sudo rm -f /tmp/fleet-demo-images.tar" >/dev/null \ || fail "podman load + tag on VM failed" rm -f "$IMAGE_TARBALL" -log "sideload complete — iot-agent's podman has $V1_IMAGE + $V2_IMAGE" +log "sideload complete — fleet-agent's podman has $V1_IMAGE + $V2_IMAGE" # ---- phase 6: sanity -------------------------------------------------------- @@ -385,9 +385,9 @@ if [[ "$AUTO" == "1" ]]; then CONTAINER_ID_V1="" for _ in $(seq 1 "$CONTAINER_WAIT_STEPS"); do id="$(ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ - -i "$HOME/.local/share/harmony/iot/ssh/id_ed25519" \ - "iot-admin@$VM_IP" -- \ - "sudo su - iot-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman ps -q --filter name=$DEPLOY_NAME'" \ + -i "$HOME/.local/share/harmony/fleet/ssh/id_ed25519" \ + "fleet-admin@$VM_IP" -- \ + "sudo su - fleet-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman ps -q --filter name=$DEPLOY_NAME'" \ 2>/dev/null | head -1)" || true if [[ -n "$id" ]]; then CONTAINER_ID_V1="$id"; break; fi sleep 2 @@ -405,7 +405,7 @@ if [[ "$AUTO" == "1" ]]; then log "waiting for operator to aggregate .status.aggregate.succeeded == 1" for _ in $(seq 1 30); do - got="$(kubectl -n "$DEPLOY_NS" get deployment.iot.nationtech.io "$DEPLOY_NAME" \ + got="$(kubectl -n "$DEPLOY_NS" get deployment.fleet.nationtech.io "$DEPLOY_NAME" \ -o jsonpath='{.status.aggregate.succeeded}' 2>/dev/null || true)" if [[ "$got" == "1" ]]; then log ".status.aggregate.succeeded = 1 — aggregator reflected agent state" @@ -413,7 +413,7 @@ if [[ "$AUTO" == "1" ]]; then fi sleep 2 done - got="$(kubectl -n "$DEPLOY_NS" get deployment.iot.nationtech.io "$DEPLOY_NAME" \ + got="$(kubectl -n "$DEPLOY_NS" get deployment.fleet.nationtech.io "$DEPLOY_NAME" \ -o jsonpath='{.status.aggregate.succeeded}' 2>/dev/null || true)" [[ "$got" == "1" ]] || fail ".status.aggregate.succeeded never reached 1 (got '$got')" @@ -431,9 +431,9 @@ if [[ "$AUTO" == "1" ]]; then CONTAINER_ID_V2="" for _ in $(seq 1 "$CONTAINER_WAIT_STEPS"); do id="$(ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ - -i "$HOME/.local/share/harmony/iot/ssh/id_ed25519" \ - "iot-admin@$VM_IP" -- \ - "sudo su - iot-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman ps -q --filter name=$DEPLOY_NAME'" \ + -i "$HOME/.local/share/harmony/fleet/ssh/id_ed25519" \ + "fleet-admin@$VM_IP" -- \ + "sudo su - fleet-agent -c 'XDG_RUNTIME_DIR=/run/user/\$(id -u) podman ps -q --filter name=$DEPLOY_NAME'" \ 2>/dev/null | head -1)" || true if [[ -n "$id" && "$id" != "$CONTAINER_ID_V1" ]]; then CONTAINER_ID_V2="$id"; break @@ -454,8 +454,8 @@ if [[ "$AUTO" == "1" ]]; then ) for _ in $(seq 1 60); do if ! ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ - -i "$HOME/.local/share/harmony/iot/ssh/id_ed25519" \ - "iot-admin@$VM_IP" -- podman ps -q --filter "name=$DEPLOY_NAME" 2>/dev/null \ + -i "$HOME/.local/share/harmony/fleet/ssh/id_ed25519" \ + "fleet-admin@$VM_IP" -- podman ps -q --filter "name=$DEPLOY_NAME" 2>/dev/null \ | grep -q .; then log "container removed from VM" break @@ -469,7 +469,7 @@ fi # ---- hand-off mode ---------------------------------------------------------- -SSH_KEY="$HOME/.local/share/harmony/iot/ssh/id_ed25519" +SSH_KEY="$HOME/.local/share/harmony/fleet/ssh/id_ed25519" cat < f.write_str("K8sIngress"), InterpretName::PodmanV0 => f.write_str("PodmanV0"), InterpretName::KvmVm => f.write_str("KvmVm"), - InterpretName::IotDeviceSetup => f.write_str("IotDeviceSetup"), + InterpretName::FleetDeviceSetup => f.write_str("FleetDeviceSetup"), } } } diff --git a/harmony/src/domain/topology/host_configuration.rs b/harmony/src/domain/topology/host_configuration.rs index 0a8c6710..efbeb447 100644 --- a/harmony/src/domain/topology/host_configuration.rs +++ b/harmony/src/domain/topology/host_configuration.rs @@ -89,7 +89,7 @@ pub trait SystemdManager: Send + Sync { ) -> Result; /// Enable+start a user-scoped unit (e.g. `podman.socket` under - /// `iot-agent`). Assumes [`UnixUserManager::ensure_linger`] has + /// `fleet-agent`). Assumes [`UnixUserManager::ensure_linger`] has /// already been called for the user. async fn ensure_user_unit_active( &self, diff --git a/harmony/src/modules/application/helm/mod.rs b/harmony/src/modules/application/helm/mod.rs index 15e3956b..6d2a9e07 100644 --- a/harmony/src/modules/application/helm/mod.rs +++ b/harmony/src/modules/application/helm/mod.rs @@ -498,22 +498,22 @@ mod tests { fn typed_variants_have_unique_filenames() { let ns = Namespace { metadata: ObjectMeta { - name: Some("iot-system".to_string()), + name: Some("fleet-system".to_string()), ..Default::default() }, ..Default::default() }; let sa = ServiceAccount { metadata: ObjectMeta { - name: Some("iot-operator".to_string()), - namespace: Some("iot-system".to_string()), + name: Some("harmony-fleet-operator".to_string()), + namespace: Some("fleet-system".to_string()), ..Default::default() }, ..Default::default() }; let cr = ClusterRole { metadata: ObjectMeta { - name: Some("iot-operator".to_string()), + name: Some("harmony-fleet-operator".to_string()), ..Default::default() }, rules: None, @@ -521,13 +521,13 @@ mod tests { }; let crb = ClusterRoleBinding { metadata: ObjectMeta { - name: Some("iot-operator".to_string()), + name: Some("harmony-fleet-operator".to_string()), ..Default::default() }, role_ref: k8s_openapi::api::rbac::v1::RoleRef { api_group: "rbac.authorization.k8s.io".to_string(), kind: "ClusterRole".to_string(), - name: "iot-operator".to_string(), + name: "harmony-fleet-operator".to_string(), }, subjects: None, }; @@ -560,14 +560,14 @@ mod tests { fn crd_filename_carries_crd_name() { let crd = CustomResourceDefinition { metadata: ObjectMeta { - name: Some("deployments.iot.nationtech.io".to_string()), + name: Some("deployments.fleet.nationtech.io".to_string()), ..Default::default() }, ..Default::default() }; assert_eq!( HelmResourceKind::Crd(crd).filename(), - "crd-deployments.iot.nationtech.io.yaml" + "crd-deployments.fleet.nationtech.io.yaml" ); } } diff --git a/harmony/src/modules/iot/assets.rs b/harmony/src/modules/fleet/assets.rs similarity index 93% rename from harmony/src/modules/iot/assets.rs rename to harmony/src/modules/fleet/assets.rs index dcbe1bf9..49900a10 100644 --- a/harmony/src/modules/iot/assets.rs +++ b/harmony/src/modules/fleet/assets.rs @@ -1,7 +1,7 @@ //! Bootstrapped assets shared across IoT workflows. //! //! Everything here follows the `ensure_*` pattern — idempotent, caches -//! results under [`HARMONY_DATA_DIR`]`/iot/…`, and runs at most once per +//! results under [`HARMONY_DATA_DIR`]`/fleet/…`, and runs at most once per //! process (enforced by a `tokio::sync::OnceCell`). The goal is that an //! operator can run the IoT smoke test against a freshly-installed host //! with nothing but `libvirt + qemu + xorriso + python3 + cargo + @@ -127,7 +127,7 @@ async fn ensure_cloud_image( return Err(exec(format!( "downloaded image sha256 mismatch: expected {expected_sha256}, got {actual}. \ Ubuntu may have rotated the 'current release' pointer — bump the pin in \ - modules::iot::assets.rs." + modules::fleet::assets.rs." ))); } // World-readable so libvirt-qemu can open it without a chmod ritual. @@ -195,7 +195,7 @@ async fn sha256_of_file(path: &Path) -> Result { } fn cloud_images_dir() -> PathBuf { - HARMONY_DATA_DIR.join("iot").join("cloud-images") + HARMONY_DATA_DIR.join("fleet").join("cloud-images") } // --------------------------------------------------------------------- @@ -206,20 +206,20 @@ fn cloud_images_dir() -> PathBuf { /// same key identifies every VM we provision for smoke/integration /// testing — cheap to reuse, easy to discard (just `rm -rf` the dir). #[derive(Debug, Clone)] -pub struct IotSshKeypair { +pub struct FleetSshKeypair { pub private_key: PathBuf, pub public_key: PathBuf, } -/// Ensure `$HARMONY_DATA_DIR/iot/ssh/id_ed25519[.pub]` exists. Runs +/// Ensure `$HARMONY_DATA_DIR/fleet/ssh/id_ed25519[.pub]` exists. Runs /// `ssh-keygen` once; subsequent calls return the existing paths. -pub async fn ensure_iot_ssh_keypair() -> Result { - static CELL: OnceCell = OnceCell::const_new(); +pub async fn ensure_fleet_ssh_keypair() -> Result { + static CELL: OnceCell = OnceCell::const_new(); CELL.get_or_try_init(provision_ssh_keypair).await.cloned() } -async fn provision_ssh_keypair() -> Result { - let dir = HARMONY_DATA_DIR.join("iot").join("ssh"); +async fn provision_ssh_keypair() -> Result { + let dir = HARMONY_DATA_DIR.join("fleet").join("ssh"); tokio::fs::create_dir_all(&dir) .await .map_err(|e| exec(format!("create ssh dir {dir:?}: {e}")))?; @@ -231,7 +231,7 @@ async fn provision_ssh_keypair() -> Result { let pub_path = dir.join("id_ed25519.pub"); if priv_path.exists() && pub_path.exists() { info!("ssh keypair cache hit at {priv_path:?}"); - return Ok(IotSshKeypair { + return Ok(FleetSshKeypair { private_key: priv_path, public_key: pub_path, }); @@ -248,7 +248,7 @@ async fn provision_ssh_keypair() -> Result { "-N", "", // no passphrase "-C", - "harmony-iot-smoke", + "harmony-fleet-smoke", "-f", ]) .arg(&priv_path) // PathBuf — kept separate so we don't force &str conversion @@ -263,7 +263,7 @@ async fn provision_ssh_keypair() -> Result { String::from_utf8_lossy(&status.stderr).trim() ))); } - Ok(IotSshKeypair { + Ok(FleetSshKeypair { private_key: priv_path, public_key: pub_path, }) @@ -271,7 +271,7 @@ async fn provision_ssh_keypair() -> Result { /// Read the generated public key (one line, openssh format) into a string /// suitable for cloud-init's `authorized_keys`. -pub async fn read_public_key(kp: &IotSshKeypair) -> Result { +pub async fn read_public_key(kp: &FleetSshKeypair) -> Result { let content = tokio::fs::read_to_string(&kp.public_key) .await .map_err(|e| exec(format!("read {:?}: {e}", kp.public_key)))?; diff --git a/harmony/src/modules/iot/libvirt_pool.rs b/harmony/src/modules/fleet/libvirt_pool.rs similarity index 86% rename from harmony/src/modules/iot/libvirt_pool.rs rename to harmony/src/modules/fleet/libvirt_pool.rs index e893d6b0..9df29bd5 100644 --- a/harmony/src/modules/iot/libvirt_pool.rs +++ b/harmony/src/modules/fleet/libvirt_pool.rs @@ -4,14 +4,14 @@ //! writable place to drop per-VM overlay disks + cloud-init seed ISOs. //! Rather than ask the operator to set that up, we create a user- //! owned dir-backed libvirt pool at -//! `$HARMONY_DATA_DIR/iot/kvm/pool/` and let libvirt handle: +//! `$HARMONY_DATA_DIR/fleet/kvm/pool/` and let libvirt handle: //! //! - **Perms**: dir contents get chowned to libvirt-qemu on VM start //! via dynamic-ownership (default-on), and back to us on VM stop //! (via remember_owner, also default-on). No `chmod 644` gymnastics. -//! - **Visibility**: `virsh vol-list harmony-iot` shows every +//! - **Visibility**: `virsh vol-list harmony-fleet` shows every //! artifact we've created. -//! - **Cleanup**: `virsh vol-delete harmony-iot` removes +//! - **Cleanup**: `virsh vol-delete harmony-fleet` removes //! managed volumes alongside `virsh undefine --remove-all-storage`. //! //! We *don't* rewrite the VM XML to use `` @@ -30,11 +30,11 @@ use virt::storage_pool::StoragePool; use crate::domain::config::HARMONY_DATA_DIR; use crate::executors::ExecutorError; -pub const HARMONY_IOT_POOL_NAME: &str = "harmony-iot"; +pub const HARMONY_FLEET_POOL_NAME: &str = "harmony-fleet"; /// Filesystem path + libvirt name of the managed pool. #[derive(Debug, Clone)] -pub struct HarmonyIotPool { +pub struct HarmonyFleetPool { pub name: String, pub path: PathBuf, } @@ -46,13 +46,13 @@ pub struct HarmonyIotPool { /// **Requires libvirt-group membership**. When the user isn't in the /// group, libvirt rejects the `qemu:///system` connection — the /// preflight check catches that upstream. -pub async fn ensure_harmony_iot_pool() -> Result { - static CELL: OnceCell = OnceCell::const_new(); +pub async fn ensure_harmony_fleet_pool() -> Result { + static CELL: OnceCell = OnceCell::const_new(); CELL.get_or_try_init(provision_pool).await.cloned() } -async fn provision_pool() -> Result { - let pool_dir = HARMONY_DATA_DIR.join("iot").join("kvm").join("pool"); +async fn provision_pool() -> Result { + let pool_dir = HARMONY_DATA_DIR.join("fleet").join("kvm").join("pool"); tokio::fs::create_dir_all(&pool_dir) .await .map_err(|e| exec(format!("create pool dir {pool_dir:?}: {e}")))?; @@ -66,7 +66,7 @@ async fn provision_pool() -> Result { .map_err(|e| exec(format!("chmod pool dir: {e}")))?; let pool_path = pool_dir.clone(); - let pool_name = HARMONY_IOT_POOL_NAME.to_string(); + let pool_name = HARMONY_FLEET_POOL_NAME.to_string(); // virt-rs is blocking C bindings — bounce into spawn_blocking. let pool_name_blocking = pool_name.clone(); @@ -106,7 +106,7 @@ async fn provision_pool() -> Result { .await .map_err(|e| exec(format!("spawn_blocking pool setup: {e}")))??; - Ok(HarmonyIotPool { + Ok(HarmonyFleetPool { name: pool_name, path: pool_path, }) diff --git a/harmony/src/modules/fleet/mod.rs b/harmony/src/modules/fleet/mod.rs new file mode 100644 index 00000000..2e42849d --- /dev/null +++ b/harmony/src/modules/fleet/mod.rs @@ -0,0 +1,40 @@ +//! Harmony-side Scores for fleet device onboarding. +//! +//! Today this module exposes [`FleetDeviceSetupScore`] — a customer +//! runs it against a freshly-booted device (Pi, VM, bare-metal node +//! later) to install podman, place the `fleet-agent` binary, drop +//! the TOML config, and bring up the agent under systemd. Re-running +//! with a changed config (different labels, new NATS URL, new +//! credentials) is how a device is moved between fleet partitions. +//! +//! The operator + agent crates live outside `harmony/` under +//! `fleet/harmony-fleet-operator/` and `fleet/harmony-fleet-agent/`. +//! What belongs here is the harmony-framework side: the Scores a +//! customer runs through `harmony_cli::run` to provision devices +//! before they ever talk to NATS. +//! +//! "Fleet" is deliberately domain-agnostic — IoT was the first +//! customer's use case but the reconciler pattern (operator → NATS +//! KV → agent → target) applies equally to Pi podman, OKD apply, +//! KVM VMs, etc. + +pub mod assets; +#[cfg(feature = "kvm")] +pub mod libvirt_pool; +pub mod preflight; +mod setup_score; +#[cfg(feature = "kvm")] +mod vm_score; + +pub use assets::{ + FleetSshKeypair, UBUNTU_2404_CLOUDIMG_ARM64_FILENAME, UBUNTU_2404_CLOUDIMG_ARM64_SHA256, + UBUNTU_2404_CLOUDIMG_ARM64_URL, UBUNTU_2404_CLOUDIMG_FILENAME, UBUNTU_2404_CLOUDIMG_SHA256, + UBUNTU_2404_CLOUDIMG_URL, ensure_fleet_ssh_keypair, ensure_ubuntu_2404_cloud_image, + ensure_ubuntu_2404_cloud_image_for_arch, read_public_key, +}; +#[cfg(feature = "kvm")] +pub use libvirt_pool::{HARMONY_FLEET_POOL_NAME, HarmonyFleetPool, ensure_harmony_fleet_pool}; +pub use preflight::{check_fleet_smoke_preflight, check_fleet_smoke_preflight_for_arch}; +pub use setup_score::{FleetDeviceSetupConfig, FleetDeviceSetupScore}; +#[cfg(feature = "kvm")] +pub use vm_score::ProvisionVmScore; diff --git a/harmony/src/modules/iot/preflight.rs b/harmony/src/modules/fleet/preflight.rs similarity index 95% rename from harmony/src/modules/iot/preflight.rs rename to harmony/src/modules/fleet/preflight.rs index f15b4750..93b08f81 100644 --- a/harmony/src/modules/iot/preflight.rs +++ b/harmony/src/modules/fleet/preflight.rs @@ -19,18 +19,20 @@ use crate::executors::ExecutorError; use crate::modules::kvm::firmware::discover_aarch64_firmware; /// Run every preflight check for an x86_64 smoke run — equivalent -/// to [`check_iot_smoke_preflight_for_arch`] with +/// to [`check_fleet_smoke_preflight_for_arch`] with /// [`VmArchitecture::X86_64`]. Kept as a distinct function so /// existing callers don't need to thread an arch through yet. -pub async fn check_iot_smoke_preflight() -> Result<(), ExecutorError> { - check_iot_smoke_preflight_for_arch(VmArchitecture::X86_64).await +pub async fn check_fleet_smoke_preflight() -> Result<(), ExecutorError> { + check_fleet_smoke_preflight_for_arch(VmArchitecture::X86_64).await } /// Arch-aware preflight. On top of the host-generic checks /// (virsh, qemu-img, xorriso, python3, ssh-keygen, libvirt group, /// default network), an aarch64 target requires /// `qemu-system-aarch64` and a usable AAVMF firmware pair. -pub async fn check_iot_smoke_preflight_for_arch(arch: VmArchitecture) -> Result<(), ExecutorError> { +pub async fn check_fleet_smoke_preflight_for_arch( + arch: VmArchitecture, +) -> Result<(), ExecutorError> { check_tool_on_path("virsh", "libvirt client").await?; check_tool_on_path("qemu-img", "qemu-utils").await?; check_tool_on_path("xorriso", "ISO image builder").await?; diff --git a/harmony/src/modules/iot/setup_score.rs b/harmony/src/modules/fleet/setup_score.rs similarity index 82% rename from harmony/src/modules/iot/setup_score.rs rename to harmony/src/modules/fleet/setup_score.rs index 6b959625..35ee960a 100644 --- a/harmony/src/modules/iot/setup_score.rs +++ b/harmony/src/modules/fleet/setup_score.rs @@ -1,4 +1,4 @@ -//! [`IotDeviceSetupScore`] — install podman + the iot-agent, wire the +//! [`FleetDeviceSetupScore`] — install podman + the fleet-agent, wire the //! agent's TOML config, enable the systemd unit. Idempotent: re-running //! with a changed config (different labels, new NATS url, etc.) updates //! only what differs and restarts the agent once. @@ -35,7 +35,7 @@ use crate::score::Score; /// regenerated, byte-compare idempotency fires, the agent restarts, /// new labels propagate. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct IotDeviceSetupConfig { +pub struct FleetDeviceSetupConfig { /// Stable device identifier. Written into the agent's TOML and /// used as the KV key prefix (`.`). Harmony /// `Id` values are sortable-by-creation-time and collision-safe @@ -52,15 +52,15 @@ pub struct IotDeviceSetupConfig { /// Shared v0 credentials (Zitadel-issued per-device tokens in v0.2). pub nats_user: String, pub nats_pass: String, - /// Local filesystem path to the cross-compiled `iot-agent-v0` + /// Local filesystem path to the cross-compiled `fleet-agent-v0` /// binary. The Score uploads it to the device and installs to - /// `/usr/local/bin/iot-agent`. Future v0.1: this becomes a + /// `/usr/local/bin/fleet-agent`. Future v0.1: this becomes a /// `DownloadableAsset` pointing at CI-published artifacts. pub agent_binary_path: PathBuf, } -impl IotDeviceSetupConfig { - /// Render the agent's `/etc/iot-agent/config.toml` content. +impl FleetDeviceSetupConfig { + /// Render the agent's `/etc/fleet-agent/config.toml` content. pub fn render_toml(&self) -> String { // Raw-string template with format! — the TOML escape rules for // double-quoted strings are just `\` and `"`, handled by @@ -110,10 +110,10 @@ Wants=network-online.target [Service] Type=simple -User=iot-agent -Environment=IOT_AGENT_CONFIG=/etc/iot-agent/config.toml +User=fleet-agent +Environment=FLEET_AGENT_CONFIG=/etc/fleet-agent/config.toml Environment=RUST_LOG=info -ExecStart=/usr/local/bin/iot-agent +ExecStart=/usr/local/bin/fleet-agent Restart=on-failure RestartSec=5 StandardOutput=journal @@ -130,23 +130,23 @@ fn toml_escape(s: &str) -> String { } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct IotDeviceSetupScore { - pub config: IotDeviceSetupConfig, +pub struct FleetDeviceSetupScore { + pub config: FleetDeviceSetupConfig, } -impl IotDeviceSetupScore { - pub fn new(config: IotDeviceSetupConfig) -> Self { +impl FleetDeviceSetupScore { + pub fn new(config: FleetDeviceSetupConfig) -> Self { Self { config } } } -impl Score for IotDeviceSetupScore { +impl Score for FleetDeviceSetupScore { fn name(&self) -> String { - format!("IotDeviceSetupScore({})", self.config.device_id) + format!("FleetDeviceSetupScore({})", self.config.device_id) } fn create_interpret(&self) -> Box> { - Box::new(IotDeviceSetupInterpret { + Box::new(FleetDeviceSetupInterpret { config: self.config.clone(), version: Version::from("0.1.0").expect("static version"), status: InterpretStatus::QUEUED, @@ -155,16 +155,16 @@ impl Score for IotDeviceSetupScore { } #[derive(Debug)] -struct IotDeviceSetupInterpret { - config: IotDeviceSetupConfig, +struct FleetDeviceSetupInterpret { + config: FleetDeviceSetupConfig, version: Version, status: InterpretStatus, } #[async_trait] -impl Interpret for IotDeviceSetupInterpret { +impl Interpret for FleetDeviceSetupInterpret { fn get_name(&self) -> InterpretName { - InterpretName::IotDeviceSetup + InterpretName::FleetDeviceSetup } fn get_version(&self) -> Version { self.version.clone() @@ -194,7 +194,7 @@ impl Interpret for IotDeviceSetupInterp log_change(&mut change_log, format!("package:{pkg}"), r); } - // 2. iot-agent user. Not `--system`: Ubuntu's useradd skips + // 2. fleet-agent user. Not `--system`: Ubuntu's useradd skips // subuid/subgid auto-allocation for system users on the // assumption that service accounts don't run user namespaces. // Rootless podman needs those ranges in /etc/subuid + @@ -206,7 +206,7 @@ impl Interpret for IotDeviceSetupInterp // Lingered so the user-systemd instance survives logout — // required for the user podman.socket we enable below. let user_spec = UserSpec { - name: "iot-agent".to_string(), + name: "fleet-agent".to_string(), group: None, supplementary_groups: vec![], shell: Some("/bin/bash".to_string()), @@ -216,16 +216,16 @@ impl Interpret for IotDeviceSetupInterp let r = UnixUserManager::ensure_user(topology, &user_spec) .await .map_err(wrap)?; - log_change(&mut change_log, "user:iot-agent", r); + log_change(&mut change_log, "user:fleet-agent", r); - let r = UnixUserManager::ensure_linger(topology, "iot-agent") + let r = UnixUserManager::ensure_linger(topology, "fleet-agent") .await .map_err(wrap)?; - log_change(&mut change_log, "linger:iot-agent", r); + log_change(&mut change_log, "linger:fleet-agent", r); // 3. User-scoped podman socket. Required by `PodmanTopology` on // the agent so it reaches /run/user//podman/podman.sock. - let r = SystemdManager::ensure_user_unit_active(topology, "iot-agent", "podman.socket") + let r = SystemdManager::ensure_user_unit_active(topology, "fleet-agent", "podman.socket") .await .map_err(wrap)?; log_change(&mut change_log, "user-unit:podman.socket", r); @@ -238,7 +238,7 @@ impl Interpret for IotDeviceSetupInterp let binary_r = FileDelivery::ensure_file( topology, &FileSpec { - path: "/usr/local/bin/iot-agent".to_string(), + path: "/usr/local/bin/fleet-agent".to_string(), source: FileSource::LocalPath(cfg.agent_binary_path.clone()), owner: Some("root".to_string()), group: Some("root".to_string()), @@ -247,25 +247,25 @@ impl Interpret for IotDeviceSetupInterp ) .await .map_err(wrap)?; - log_change(&mut change_log, "file:/usr/local/bin/iot-agent", binary_r); + log_change(&mut change_log, "file:/usr/local/bin/fleet-agent", binary_r); - // 5. /etc/iot-agent/ + config.toml + // 5. /etc/fleet-agent/ + config.toml let config_toml = cfg.render_toml(); let toml_spec = FileSpec { - path: "/etc/iot-agent/config.toml".to_string(), + path: "/etc/fleet-agent/config.toml".to_string(), source: FileSource::Content(config_toml), - owner: Some("iot-agent".to_string()), - group: Some("iot-agent".to_string()), + owner: Some("fleet-agent".to_string()), + group: Some("fleet-agent".to_string()), mode: Some(0o600), }; let toml_r = FileDelivery::ensure_file(topology, &toml_spec) .await .map_err(wrap)?; - log_change(&mut change_log, "file:/etc/iot-agent/config.toml", toml_r); + log_change(&mut change_log, "file:/etc/fleet-agent/config.toml", toml_r); // 6. systemd unit for the agent itself. let unit = SystemdUnitSpec { - name: "iot-agent".to_string(), + name: "fleet-agent".to_string(), unit_content: cfg.render_systemd_unit().to_string(), scope: SystemdScope::System, start_immediately: true, @@ -273,18 +273,18 @@ impl Interpret for IotDeviceSetupInterp let unit_r = SystemdManager::ensure_systemd_unit(topology, &unit) .await .map_err(wrap)?; - log_change(&mut change_log, "unit:iot-agent", unit_r); + log_change(&mut change_log, "unit:fleet-agent", unit_r); // 7. Restart the agent iff anything that affects it changed. let needs_restart = toml_r.changed || unit_r.changed || binary_r.changed; if needs_restart { - SystemdManager::restart_service(topology, "iot-agent", SystemdScope::System) + SystemdManager::restart_service(topology, "fleet-agent", SystemdScope::System) .await .map_err(wrap)?; - change_log.push("restart:iot-agent".to_string()); - info!("iot-agent restarted to pick up config/unit change"); + change_log.push("restart:fleet-agent".to_string()); + info!("fleet-agent restarted to pick up config/unit change"); } else { - debug!("iot-agent config + unit unchanged; no restart"); + debug!("fleet-agent config + unit unchanged; no restart"); } let outcome = if change_log.is_empty() { @@ -317,8 +317,8 @@ fn log_change(change_log: &mut Vec, what: impl Into, r: ChangeRe mod tests { use super::*; - fn base_config(labels: BTreeMap) -> IotDeviceSetupConfig { - IotDeviceSetupConfig { + fn base_config(labels: BTreeMap) -> FleetDeviceSetupConfig { + FleetDeviceSetupConfig { device_id: Id::from("pi-42".to_string()), labels, nats_urls: vec!["nats://nats:4222".to_string()], diff --git a/harmony/src/modules/iot/vm_score.rs b/harmony/src/modules/fleet/vm_score.rs similarity index 100% rename from harmony/src/modules/iot/vm_score.rs rename to harmony/src/modules/fleet/vm_score.rs diff --git a/harmony/src/modules/iot/mod.rs b/harmony/src/modules/iot/mod.rs deleted file mode 100644 index 23ec2987..00000000 --- a/harmony/src/modules/iot/mod.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! IoT fleet primitives exposed to customers. -//! -//! Right now that's the single [`IotDeviceSetupScore`] — a customer runs -//! it against a freshly-booted device (Pi or VM) to install podman, -//! place the iot-agent binary, drop the TOML config, and bring up the -//! agent under systemd. Re-running with a different config (e.g. -//! different `group`) is what moves a device between fleet partitions. -//! -//! The operator + agent crates live outside of `harmony/` in `iot/`. -//! This module is where *Harmony Scores* that target IoT fleets live — -//! they run inside the Harmony framework proper, driven by the same -//! `harmony_cli::run` story every other Score uses. - -pub mod assets; -#[cfg(feature = "kvm")] -pub mod libvirt_pool; -pub mod preflight; -mod setup_score; -#[cfg(feature = "kvm")] -mod vm_score; - -pub use assets::{ - IotSshKeypair, UBUNTU_2404_CLOUDIMG_ARM64_FILENAME, UBUNTU_2404_CLOUDIMG_ARM64_SHA256, - UBUNTU_2404_CLOUDIMG_ARM64_URL, UBUNTU_2404_CLOUDIMG_FILENAME, UBUNTU_2404_CLOUDIMG_SHA256, - UBUNTU_2404_CLOUDIMG_URL, ensure_iot_ssh_keypair, ensure_ubuntu_2404_cloud_image, - ensure_ubuntu_2404_cloud_image_for_arch, read_public_key, -}; -#[cfg(feature = "kvm")] -pub use libvirt_pool::{HARMONY_IOT_POOL_NAME, HarmonyIotPool, ensure_harmony_iot_pool}; -pub use preflight::{check_iot_smoke_preflight, check_iot_smoke_preflight_for_arch}; -pub use setup_score::{IotDeviceSetupConfig, IotDeviceSetupScore}; -#[cfg(feature = "kvm")] -pub use vm_score::ProvisionVmScore; diff --git a/harmony/src/modules/k8s/bare_topology.rs b/harmony/src/modules/k8s/bare_topology.rs index dfeac545..e6e9c58d 100644 --- a/harmony/src/modules/k8s/bare_topology.rs +++ b/harmony/src/modules/k8s/bare_topology.rs @@ -17,7 +17,7 @@ //! nothing more. //! //! History: this type is the promotion of a three-dozen-line -//! `InstallTopology` that lived inside `iot-operator-v0`'s +//! `InstallTopology` that lived inside `harmony-fleet-operator`'s //! `install.rs`. When the NATS single-node install work added a //! second consumer wanting the same shape, the extraction became //! obvious (see ROADMAP/12-code-review-april-2026.md §12.6). diff --git a/harmony/src/modules/kvm/cloudinit.rs b/harmony/src/modules/kvm/cloudinit.rs index 0e7d6dd5..496514ba 100644 --- a/harmony/src/modules/kvm/cloudinit.rs +++ b/harmony/src/modules/kvm/cloudinit.rs @@ -225,7 +225,7 @@ mod tests { let cfg = CloudInitSeedConfig { hostname: "pi-01", authorized_key: "ssh-ed25519 AAAA test", - user: "iot-admin", + user: "fleet-admin", admin_password: None, extra_runcmd: vec![], }; @@ -243,7 +243,7 @@ mod tests { let cfg = CloudInitSeedConfig { hostname: "pi-01", authorized_key: "ssh-ed25519 AAAA test", - user: "iot-admin", + user: "fleet-admin", admin_password: Some("break-things-123"), extra_runcmd: vec![], }; @@ -261,7 +261,7 @@ mod tests { let cfg = CloudInitSeedConfig { hostname: "pi-01", authorized_key: "ssh-ed25519 AAAA", - user: "iot-admin", + user: "fleet-admin", admin_password: Some("he said \"hi\""), extra_runcmd: vec![], }; diff --git a/harmony/src/modules/kvm/topology.rs b/harmony/src/modules/kvm/topology.rs index 1d7e44ce..4d780d58 100644 --- a/harmony/src/modules/kvm/topology.rs +++ b/harmony/src/modules/kvm/topology.rs @@ -35,7 +35,7 @@ pub const DEFAULT_ADMIN_USER: &str = "harmony-admin"; /// /// Composes with a caller-chosen storage pool directory where per-VM /// overlays + seed ISOs are placed. Harmony's IoT workflows use -/// [`crate::modules::iot::ensure_harmony_iot_pool`] to populate that +/// [`crate::modules::fleet::ensure_harmony_fleet_pool`] to populate that /// dir; other callers can point at any user-owned libvirt pool root. pub struct KvmVirtualMachineHost { name: String, diff --git a/harmony/src/modules/linux/ansible_configurator.rs b/harmony/src/modules/linux/ansible_configurator.rs index 3ee9087c..af78bc03 100644 --- a/harmony/src/modules/linux/ansible_configurator.rs +++ b/harmony/src/modules/linux/ansible_configurator.rs @@ -57,7 +57,7 @@ impl AnsibleHostConfigurator { // encapsulation we want. Callers say "install podman"; we // pick apt/dnf/pacman/apk. Debian-family is the only dispatch // currently wired because it's our first concrete target (IoT - // runs on Raspbian/Ubuntu per ROADMAP/iot_platform/ + // runs on Raspbian/Ubuntu per ROADMAP/fleet_platform/ // v0_walking_skeleton.md §5.3). Extending to RHEL/Fedora/ // Alpine is a matter of detecting the family here and picking // `ansible.builtin.dnf` / `community.general.pacman` / @@ -112,7 +112,7 @@ impl AnsibleHostConfigurator { spec: &FileSpec, ) -> Result { // Ansible's `copy` module doesn't auto-create parent dirs, so - // writes into fresh paths like `/etc/iot-agent/config.toml` + // writes into fresh paths like `/etc/fleet-agent/config.toml` // fail with "Destination directory … does not exist". Create // the parent first via the `file` module; state=directory is // idempotent so this is a cheap noop on re-run. diff --git a/harmony/src/modules/mod.rs b/harmony/src/modules/mod.rs index db62415e..86e1e338 100644 --- a/harmony/src/modules/mod.rs +++ b/harmony/src/modules/mod.rs @@ -5,10 +5,10 @@ pub mod cert_manager; pub mod dhcp; pub mod dns; pub mod dummy; +pub mod fleet; pub mod helm; pub mod http; pub mod inventory; -pub mod iot; pub mod k3d; pub mod k8s; #[cfg(feature = "kvm")] diff --git a/harmony/src/modules/nats/helm_chart.rs b/harmony/src/modules/nats/helm_chart.rs index 7ec37f7b..5a1f17b5 100644 --- a/harmony/src/modules/nats/helm_chart.rs +++ b/harmony/src/modules/nats/helm_chart.rs @@ -160,7 +160,11 @@ mod tests { #[test] fn into_helm_chart_score_pins_chart_and_repo() { - let s = NatsHelmChartScore::new("iot-nats", "iot-system", "replicaCount: 1\n".to_string()); + let s = NatsHelmChartScore::new( + "fleet-nats", + "fleet-system", + "replicaCount: 1\n".to_string(), + ); let hc = s.into_helm_chart_score(); assert_eq!(hc.chart_name.to_string(), CHART_NAME); let repo = hc.repository.expect("repo must be pinned"); diff --git a/harmony/src/modules/nats/score_nats_basic.rs b/harmony/src/modules/nats/score_nats_basic.rs index 7b06ee32..368d02a5 100644 --- a/harmony/src/modules/nats/score_nats_basic.rs +++ b/harmony/src/modules/nats/score_nats_basic.rs @@ -19,7 +19,7 @@ //! use harmony::inventory::Inventory; //! //! let topology = K8sBareTopology::from_kubeconfig("nats-install").await?; -//! let score = NatsBasicScore::new("iot-nats", "iot-system").load_balancer(); +//! let score = NatsBasicScore::new("fleet-nats", "fleet-system").load_balancer(); //! score.create_interpret().execute(&Inventory::empty(), &topology).await?; //! ``` @@ -229,8 +229,8 @@ mod tests { #[test] fn render_values_includes_fullname_and_replica() { - let y = NatsBasicScore::new("iot-nats", "iot-system").render_values(); - assert!(y.contains("fullnameOverride: iot-nats")); + let y = NatsBasicScore::new("fleet-nats", "fleet-system").render_values(); + assert!(y.contains("fullnameOverride: fleet-nats")); assert!(y.contains("replicaCount: 1")); // cluster.enabled stays false for a single-node shape. assert!(y.contains("cluster:\n enabled: false")); diff --git a/harmony/src/modules/podman/mod.rs b/harmony/src/modules/podman/mod.rs index b25ab85c..7d786ff8 100644 --- a/harmony/src/modules/podman/mod.rs +++ b/harmony/src/modules/podman/mod.rs @@ -3,5 +3,5 @@ mod score; mod topology; pub use interpret::PodmanV0Interpret; -pub use score::{IotScore, PodmanService, PodmanV0Score}; +pub use score::{PodmanService, PodmanV0Score, ReconcileScore}; pub use topology::PodmanTopology; diff --git a/harmony/src/modules/podman/score.rs b/harmony/src/modules/podman/score.rs index e795cf0c..c1ea95a1 100644 --- a/harmony/src/modules/podman/score.rs +++ b/harmony/src/modules/podman/score.rs @@ -55,7 +55,7 @@ impl PodmanV0Score { /// log-and-skip the unknown tag. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(tag = "type", content = "data")] -pub enum IotScore { +pub enum ReconcileScore { PodmanV0(PodmanV0Score), } @@ -69,16 +69,16 @@ impl Score for PodmanV0Score { } } -impl Score for IotScore { +impl Score for ReconcileScore { fn create_interpret(&self) -> Box> { match self { - IotScore::PodmanV0(score) => score.create_interpret(), + ReconcileScore::PodmanV0(score) => score.create_interpret(), } } fn name(&self) -> String { match self { - IotScore::PodmanV0(_) => "PodmanV0Score".to_string(), + ReconcileScore::PodmanV0(_) => "PodmanV0Score".to_string(), } } } @@ -89,7 +89,7 @@ mod tests { #[test] fn podman_v0_score_serializes_with_adjacent_tag() { - let score = IotScore::PodmanV0(PodmanV0Score { + let score = ReconcileScore::PodmanV0(PodmanV0Score { services: vec![PodmanService { name: "web".to_string(), image: "nginx:latest".to_string(), @@ -103,7 +103,7 @@ mod tests { #[test] fn podman_v0_score_roundtrip() { - let score = IotScore::PodmanV0(PodmanV0Score { + let score = ReconcileScore::PodmanV0(PodmanV0Score { services: vec![ PodmanService { name: "web".to_string(), @@ -118,7 +118,7 @@ mod tests { ], }); let serialized = serde_json::to_string(&score).unwrap(); - let deserialized: IotScore = serde_json::from_str(&serialized).unwrap(); + let deserialized: ReconcileScore = serde_json::from_str(&serialized).unwrap(); assert_eq!(score, deserialized); } -- 2.39.5