Compare commits
340 Commits
2950235d23
...
feat/multi
| Author | SHA1 | Date | |
|---|---|---|---|
| ec794f076e | |||
| 665ed24f65 | |||
| 3d088b709f | |||
| da5a869771 | |||
| fedb346548 | |||
| 6ea5630d30 | |||
| b42815f79c | |||
| ed70bfd236 | |||
| 0a324184ad | |||
| ad2ae2e4f8 | |||
|
|
0a5da43c76 | ||
| b6be44202e | |||
| c372e781d8 | |||
| 56c181fc3d | |||
| 55bfe306ad | |||
| 62fa3c2b10 | |||
| ea1380f98a | |||
| 701d8cfab9 | |||
| f9906cb419 | |||
| cb4382fbb5 | |||
| 1eca2cc1a9 | |||
| 269f13ae9b | |||
| ec277bc13d | |||
| a9f8cd16ea | |||
| c542a935e3 | |||
| 0395d11e98 | |||
| 05e7b8075c | |||
| b857412151 | |||
| 7bb3602ab8 | |||
| 78b80c2169 | |||
| 0876f4e4f0 | |||
| 6ac0e095a3 | |||
| ff2efc0a66 | |||
|
|
f180cc4c80 | ||
| 3ca31179d0 | |||
| a9fe4ab267 | |||
| 65cc9befeb | |||
| d456a1f9ee | |||
| 5895f867cf | |||
| 8cc7adf196 | |||
| a1ab5d40fb | |||
| 6c92dd24f7 | |||
| c805d7e018 | |||
| b33615b969 | |||
| 0f59f29ac4 | |||
| 361f240762 | |||
| 57c3b01e66 | |||
| 94ddf027dd | |||
| 06a2be4496 | |||
| e2a09efdee | |||
| d36c574590 | |||
| 2618441de3 | |||
| da6610c625 | |||
| e956772593 | |||
| 27c51e0ec5 | |||
| bfca9cf163 | |||
| 597dcbc848 | |||
| cd3ea6fc10 | |||
| a53e8552e9 | |||
| 89eb88d10e | |||
| 72fb05b5cc | |||
| 6685b05cc5 | |||
| 07116eb8a6 | |||
| 3f34f868eb | |||
| bc6f7336d2 | |||
| 01da8631da | |||
| 67b5c2df07 | |||
| 1eaf63417b | |||
| 5e7803d2ba | |||
| 9a610661c7 | |||
| 70a65ed5d0 | |||
| 26e8e386b9 | |||
| 19cb7f73bc | |||
| 84f38974b1 | |||
| 7d027bcfc4 | |||
| d1a274b705 | |||
| b43ca7c740 | |||
| 2a6a233fb2 | |||
|
|
610ce84280 | ||
|
|
8bb4a9d3f6 | ||
|
|
67f3a23071 | ||
| d86970f81b | |||
| 623a3f019b | |||
| fd8f643a8f | |||
|
|
bd214f8fb8 | ||
| f0ed548755 | |||
| 1de96027a1 | |||
| 0812937a67 | |||
| 29a261575b | |||
| dcf8335240 | |||
|
|
f876b5e67b | ||
| 440c1bce12 | |||
| 024084859e | |||
| 54990cd1a5 | |||
| 06aab1f57f | |||
| 1ab66af718 | |||
|
|
0fff4ef566 | ||
| d95e84d6fc | |||
| a47be890de | |||
| ee8dfa4a93 | |||
| 5d41cc8380 | |||
| cef745b642 | |||
| d9959378a6 | |||
|
|
07f1151e4c | ||
|
|
f7625f0484 | ||
|
|
537da5800f | ||
| 3be2fa246c | |||
| 9452cf5616 | |||
| 9b7456e148 | |||
| 98f3f82ad5 | |||
| 3eca409f8d | |||
| c11a31c7a9 | |||
| 1a6d72dc17 | |||
| df9e21807e | |||
| b1bf4fd4d5 | |||
| f702ecd8c9 | |||
| a19b52e690 | |||
| b73f2e76d0 | |||
| b4534c6ee0 | |||
| 6149249a6c | |||
| d9935e20cb | |||
| 7b0f3b79b1 | |||
| e6612245a5 | |||
| b4f5b91a57 | |||
| d317c0ba76 | |||
| 539b8299ae | |||
| 5a89495c61 | |||
| fb7849c010 | |||
| 6371009c6f | |||
| a4aa685a4f | |||
| 6bf10b093c | |||
| 3eecc2f590 | |||
| 3959c07261 | |||
| e50c01c0b3 | |||
| 286460d59e | |||
| 4baa3ae707 | |||
| 82119076cf | |||
| f2a350fae6 | |||
| 197770a603 | |||
| ab69a2c264 | |||
| e857efa92f | |||
| 2ff3f4afa9 | |||
| 2f6a11ead7 | |||
| 7de9860dcf | |||
| 6e884cff3a | |||
| c74c51090a | |||
| 8ae0d6b548 | |||
| ee02906ce9 | |||
| 284cc6afd7 | |||
| 9bf6aac82e | |||
| 460c8b59e1 | |||
| 8e857bc72a | |||
| e8d55d27e4 | |||
| fea7e9ddb9 | |||
| 7ec89cdac5 | |||
| 55143dcad4 | |||
| 17ad92402d | |||
| 29e74a2712 | |||
| e16f8fa82e | |||
| c21f3084dc | |||
| 2c706225a1 | |||
| acfb93f1a2 | |||
| f437c40428 | |||
| e06548ac44 | |||
| 155e9bac28 | |||
| 7bebc58615 | |||
| 246d6718c3 | |||
| d776042e20 | |||
| 86c681be70 | |||
| b94dd1e595 | |||
| ef5ec4a131 | |||
| a8eb06f686 | |||
| d1678b529e | |||
| 1451260d4d | |||
| 415488ba39 | |||
| bf7a6d590c | |||
| 8d8120bbfd | |||
| 6cf61ae67c | |||
| 8c65aef127 | |||
| 00e71b97f6 | |||
| ee2bba5623 | |||
| 118d34db55 | |||
| 24e466fadd | |||
| 14fc4345c1 | |||
| 8e472e4c65 | |||
| ec17ccc246 | |||
| 5127f44ab3 | |||
| 2ff70db0b1 | |||
| e17ac1af83 | |||
| 31e59937dc | |||
| 12eb4ae31f | |||
| a2be9457b9 | |||
| 0d56fbc09d | |||
| 56dc1e93c1 | |||
| 691540fe64 | |||
| 7e3f1b1830 | |||
| b631e8ccbb | |||
| 60f2f31d6c | |||
| 045954f8d3 | |||
| 27f1a9dbdd | |||
| 7c809bf18a | |||
| 6490e5e82a | |||
| 5e51f7490c | |||
| 97fba07f4e | |||
| 624e4330bb | |||
| e7917843bc | |||
| 7cd541bdd8 | |||
| 270dd49567 | |||
| 0187300473 | |||
| bf16566b4e | |||
| 895fb02f4e | |||
| 88d6af9815 | |||
| 5aa9dc701f | |||
| f4ef895d2e | |||
| 6e7148a945 | |||
| 83453273c6 | |||
| 76ae5eb747 | |||
| 9c51040f3b | |||
| e1a8ee1c15 | |||
| 44b2b092a8 | |||
| 19bd47a545 | |||
| 2b6d2e8606 | |||
| 7fc2b1ebfe | |||
| e80752ea3f | |||
| bae7222d64 | |||
| f7d3da3ac9 | |||
| eb8a8a2e04 | |||
| b4c6848433 | |||
| 0d94c537a0 | |||
| 861f266c4e | |||
| 51724d0e55 | |||
| c2d1cb9b76 | |||
|
|
c84a02c8ec | ||
| 8d3d167848 | |||
| 94f6cc6942 | |||
| 4a9b95acad | |||
| ef9c1cce77 | |||
| df65ac3439 | |||
| e5ddd296db | |||
| 4be008556e | |||
| 78e9893341 | |||
| d9921b857b | |||
| e62ef001ed | |||
| 1fb7132c64 | |||
| 2d74c66fc6 | |||
| 8a199b64f5 | |||
| b7fe62fcbb | |||
| cd8542258c | |||
| 472a3c1051 | |||
| 88270ece61 | |||
| e7cfbf914a | |||
| fbd466a85c | |||
| 2f8e150f41 | |||
| 764fd6d451 | |||
| 78fffcd725 | |||
| e1133ea114 | |||
| d8e8a49745 | |||
| a7ba9be486 | |||
| 1c3669cb47 | |||
| 90b80b24bc | |||
| c879ca143f | |||
| bc2bd2f2f4 | |||
| 28978299c9 | |||
| 87f6afc249 | |||
| 254f392cb5 | |||
| a6bcaade46 | |||
| 6c145f1100 | |||
| 40cd765019 | |||
| db9c8d83e6 | |||
| 20551b4a80 | |||
| 5c026ae6dd | |||
| 76c0cacc1b | |||
| f17948397f | |||
| 16a665241e | |||
| 065e3904b8 | |||
| 22752960f9 | |||
| 23971ecd7c | |||
| fbcd3e4f7f | |||
| d307893f15 | |||
| 00c0566533 | |||
| f5e3f1aaea | |||
| 508b97ca7c | |||
| 80bdd0ee8a | |||
| 6c06a4ae07 | |||
| ad1aa897b1 | |||
| dccc9c04f5 | |||
| 9345e63a32 | |||
| ff830486af | |||
| da83019d85 | |||
| 53aa47f91e | |||
| 8f470278a7 | |||
| 213fb25686 | |||
| 45668638e1 | |||
| 0857aba039 | |||
| 452ebc2614 | |||
| 9e456bb4f5 | |||
| 83ba0e1044 | |||
| 2229e9d7af | |||
| 15785dd219 | |||
| 847d84b46f | |||
| 3f6f1fa0d4 | |||
| 6812d05849 | |||
| 027114c48c | |||
| eeafa086f3 | |||
| abd20b96a2 | |||
| 0ba7f2536c | |||
| 3097e6af67 | |||
| 606ea43b51 | |||
| 31ae8365a6 | |||
| 1cbf4de2a1 | |||
| b4cc5cff4f | |||
| ab9b7476a4 | |||
| e6384da57e | |||
| 79213ba8d7 | |||
| 8a1627e728 | |||
| fc718f11cf | |||
| f7dc15cbf0 | |||
| 6e9bf3a4be | |||
| fda007f014 | |||
| d7897f29c4 | |||
| 3962238f0d | |||
| 2433c02de9 | |||
| 3d6f646460 | |||
| 35fcc295aa | |||
| 7291db7ca3 | |||
| fbc18d2fad | |||
| fe42ebd347 | |||
| c8547e38f2 | |||
| bfc79abfb6 | |||
| 7697a170bd | |||
| 941c9bc0b0 | |||
| 51aeea1ec9 | |||
| 8118df85ee | |||
| 7af83910ef | |||
| 1475f4af0c | |||
| a3a61c734f | |||
| 3f77bc7aef | |||
| d5125dd811 | |||
| 1ca316c085 | |||
| e390f1edb3 |
5
.cargo/config.toml
Normal file
5
.cargo/config.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[target.x86_64-pc-windows-msvc]
|
||||
rustflags = ["-C", "link-arg=/STACK:8000000"]
|
||||
|
||||
[target.x86_64-pc-windows-gnu]
|
||||
rustflags = ["-C", "link-arg=-Wl,--stack,8000000"]
|
||||
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
target/
|
||||
Dockerfile
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -2,3 +2,5 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text
|
||||
grubx64.efi filter=lfs diff=lfs merge=lfs -text
|
||||
initrd filter=lfs diff=lfs merge=lfs -text
|
||||
linux filter=lfs diff=lfs merge=lfs -text
|
||||
data/okd/bin/* filter=lfs diff=lfs merge=lfs -text
|
||||
data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
18
.gitea/workflows/check.yml
Normal file
18
.gitea/workflows/check.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Run Check Script
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run check script
|
||||
run: bash check.sh
|
||||
95
.gitea/workflows/harmony_composer.yaml
Normal file
95
.gitea/workflows/harmony_composer.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
name: Compile and package harmony_composer
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
package_harmony_composer:
|
||||
container:
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
||||
runs-on: dind
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build for Linux x86_64
|
||||
run: cargo build --release --bin harmony_composer --target x86_64-unknown-linux-gnu
|
||||
|
||||
- name: Build for Windows x86_64 GNU
|
||||
run: cargo build --release --bin harmony_composer --target x86_64-pc-windows-gnu
|
||||
|
||||
- name: Setup log into hub.nationtech.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: hub.nationtech.io
|
||||
username: ${{ secrets.HUB_BOT_USER }}
|
||||
password: ${{ secrets.HUB_BOT_PASSWORD }}
|
||||
|
||||
# TODO: build ARM images and MacOS binaries (or other targets) too
|
||||
|
||||
- name: Update snapshot-latest tag
|
||||
run: |
|
||||
git config user.name "Gitea CI"
|
||||
git config user.email "ci@nationtech.io"
|
||||
git tag -f snapshot-latest
|
||||
git push origin snapshot-latest --force
|
||||
|
||||
- name: Install jq
|
||||
run: apt install -y jq # The current image includes apt lists so we don't have to apt update and rm /var/lib/apt... every time. But if the image is optimized it won't work anymore
|
||||
|
||||
- name: Create or update release
|
||||
run: |
|
||||
# First, check if release exists and delete it if it does
|
||||
RELEASE_ID=$(curl -s -X GET \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
|
||||
| jq -r '.id // empty')
|
||||
|
||||
if [ -n "$RELEASE_ID" ]; then
|
||||
# Delete existing release
|
||||
curl -X DELETE \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
|
||||
fi
|
||||
|
||||
# Create new release
|
||||
RESPONSE=$(curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"tag_name": "snapshot-latest",
|
||||
"name": "Latest Snapshot",
|
||||
"body": "Automated snapshot build from master branch",
|
||||
"draft": false,
|
||||
"prerelease": true
|
||||
}' \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
|
||||
|
||||
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Linux binary
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@target/x86_64-unknown-linux-gnu/release/harmony_composer" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/${{ env.RELEASE_ID }}/assets?name=harmony_composer"
|
||||
|
||||
- name: Upload Windows binary
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@target/x86_64-pc-windows-gnu/release/harmony_composer.exe" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/${{ env.RELEASE_ID }}/assets?name=harmony_composer.exe"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: hub.nationtech.io/harmony/harmony_composer:latest
|
||||
29
.gitignore
vendored
29
.gitignore
vendored
@@ -1,3 +1,26 @@
|
||||
target
|
||||
private_repos
|
||||
log/
|
||||
### General ###
|
||||
private_repos/
|
||||
|
||||
### Harmony ###
|
||||
harmony.log
|
||||
data/okd/installation_files*
|
||||
|
||||
### Helm ###
|
||||
# Chart dependencies
|
||||
**/charts/*.tgz
|
||||
|
||||
### Rust ###
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "examples/try_rust_webapp/tryrust.org"]
|
||||
path = examples/try_rust_webapp/tryrust.org
|
||||
url = https://github.com/rust-dd/tryrust.org.git
|
||||
20
.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json
generated
Normal file
20
.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json
generated
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "host_id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
|
||||
}
|
||||
32
.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json
generated
Normal file
32
.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json
generated
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json<PhysicalHost>\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "version_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "data: Json<PhysicalHost>",
|
||||
"ordinal": 2,
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067"
|
||||
}
|
||||
32
.sqlx/query-934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990.json
generated
Normal file
32
.sqlx/query-934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990.json
generated
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id, version_id, data as \"data: Json<PhysicalHost>\" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "version_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "data: Json<PhysicalHost>",
|
||||
"ordinal": 2,
|
||||
"type_info": "Null"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990"
|
||||
}
|
||||
12
.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json
generated
Normal file
12
.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
|
||||
}
|
||||
12
.sqlx/query-f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8.json
generated
Normal file
12
.sqlx/query-f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8"
|
||||
}
|
||||
36
CONTRIBUTING.md
Normal file
36
CONTRIBUTING.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Contributing to the Harmony project
|
||||
|
||||
## Write small P-R
|
||||
|
||||
Aim for the smallest piece of work that is mergeable.
|
||||
|
||||
Mergeable means that :
|
||||
|
||||
- it does not break the build
|
||||
- it moves the codebase one step forward
|
||||
|
||||
P-Rs can be many things, they do not have to be complete features.
|
||||
|
||||
### What a P-R **should** be
|
||||
|
||||
- Introduce a new trait : This will be the place to discuss the new trait addition, its design and implementation
|
||||
- A new implementation of a trait : a new concrete implementation of the LoadBalancer trait
|
||||
- A new CI check : something that improves quality, robustness, ci performance
|
||||
- Documentation improvements
|
||||
- Refactoring
|
||||
- Bugfix
|
||||
|
||||
### What a P-R **should not** be
|
||||
|
||||
- Large. Anything over 200 lines (excluding generated lines) should have a very good reason to be this large.
|
||||
- A mix of refactoring, bug fixes and new features.
|
||||
- Introducing multiple new features or ideas at once.
|
||||
- Multiple new implementations of a trait/functionnality at once
|
||||
|
||||
The general idea is to keep P-Rs small and single purpose.
|
||||
|
||||
## Commit message formatting
|
||||
|
||||
We follow conventional commits guidelines.
|
||||
|
||||
https://www.conventionalcommits.org/en/v1.0.0/
|
||||
4612
Cargo.lock
generated
4612
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
73
Cargo.toml
73
Cargo.toml
@@ -9,6 +9,12 @@ members = [
|
||||
"harmony_tui",
|
||||
"opnsense-config",
|
||||
"opnsense-config-xml",
|
||||
"harmony_cli",
|
||||
"k3d",
|
||||
"harmony_composer",
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
"harmony_secret", "adr/agent_discovery/mdns",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -17,25 +23,48 @@ readme = "README.md"
|
||||
license = "GNU AGPL v3"
|
||||
|
||||
[workspace.dependencies]
|
||||
log = "0.4.22"
|
||||
env_logger = "0.11.5"
|
||||
derive-new = "0.7.0"
|
||||
async-trait = "0.1.82"
|
||||
tokio = { version = "1.40.0", features = ["io-std", "fs"] }
|
||||
cidr = "0.2.3"
|
||||
russh = "0.45.0"
|
||||
russh-keys = "0.45.0"
|
||||
rand = "0.8.5"
|
||||
url = "2.5.4"
|
||||
kube = "0.98.0"
|
||||
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
|
||||
serde_yaml = "0.9.34"
|
||||
http = "1.2.0"
|
||||
|
||||
[workspace.dependencies.uuid]
|
||||
version = "1.11.0"
|
||||
features = [
|
||||
"v4", # Lets you generate random UUIDs
|
||||
"fast-rng", # Use a faster (but still sufficiently random) RNG
|
||||
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
|
||||
]
|
||||
log = { version = "0.4", features = ["kv"] }
|
||||
env_logger = "0.11"
|
||||
derive-new = "0.7"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1.40", features = [
|
||||
"io-std",
|
||||
"fs",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
cidr = { features = ["serde"], version = "0.2" }
|
||||
russh = "0.45"
|
||||
russh-keys = "0.45"
|
||||
rand = "0.9"
|
||||
url = "2.5"
|
||||
kube = { version = "1.1.0", features = [
|
||||
"config",
|
||||
"client",
|
||||
"runtime",
|
||||
"rustls-tls",
|
||||
"ws",
|
||||
"jsonpatch",
|
||||
] }
|
||||
k8s-openapi = { version = "0.25", features = ["v1_30"] }
|
||||
serde_yaml = "0.9"
|
||||
serde-value = "0.7"
|
||||
http = "1.2"
|
||||
inquire = "0.7"
|
||||
convert_case = "0.8"
|
||||
chrono = "0.4"
|
||||
similar = "2"
|
||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3.20.0"
|
||||
bollard = "0.19.1"
|
||||
base64 = "0.22.1"
|
||||
tar = "0.4.44"
|
||||
lazy_static = "1.5.0"
|
||||
directories = "6.0.0"
|
||||
thiserror = "2.0.14"
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
askama = "0.14"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
||||
|
||||
26
Dockerfile
Normal file
26
Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
FROM docker.io/rust:1.89.0 AS build
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cargo build --release --bin harmony_composer
|
||||
|
||||
FROM docker.io/rust:1.89.0
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN rustup target add x86_64-pc-windows-gnu
|
||||
RUN rustup target add x86_64-unknown-linux-gnu
|
||||
RUN rustup component add rustfmt
|
||||
RUN rustup component add clippy
|
||||
|
||||
RUN apt update
|
||||
|
||||
# TODO: Consider adding more supported targets
|
||||
# nodejs for checkout action, docker for building containers, mingw for cross-compiling for windows
|
||||
RUN apt install -y nodejs docker.io mingw-w64
|
||||
|
||||
COPY --from=build /app/target/release/harmony_composer .
|
||||
|
||||
ENTRYPOINT ["/app/harmony_composer"]
|
||||
151
README.md
151
README.md
@@ -1,9 +1,150 @@
|
||||
### Watch the whole repo on every change
|
||||
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
|
||||
|
||||
Due to the current setup being a mix of separate repositories with gitignore and rust workspace, a few options are required for cargo-watch to have the desired behavior :
|
||||
_By [NationTech](https://nationtech.io)_
|
||||
|
||||
```sh
|
||||
RUST_LOG=info cargo watch --ignore-nothing -w harmony -w private_repos/ -x 'run --bin nationtech'
|
||||
[](https://git.nationtech.io/nationtech/harmony)
|
||||
[](LICENSE)
|
||||
|
||||
### Unify
|
||||
|
||||
- **Project Scaffolding**
|
||||
- **Infrastructure Provisioning**
|
||||
- **Application Deployment**
|
||||
- **Day-2 operations**
|
||||
|
||||
All in **one strongly-typed Rust codebase**.
|
||||
|
||||
### Deploy anywhere
|
||||
|
||||
From a **developer laptop** to a **global production cluster**, a single **source of truth** drives the **full software lifecycle.**
|
||||
|
||||
---
|
||||
|
||||
## 1 · The Harmony Philosophy
|
||||
|
||||
Infrastructure is essential, but it shouldn’t be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
|
||||
|
||||
| Principle | What it means for you |
|
||||
| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
|
||||
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your application’s needs match the target environment’s capabilities at **compile-time**, eliminating an entire class of runtime outages. |
|
||||
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
|
||||
|
||||
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
|
||||
|
||||
---
|
||||
|
||||
## 2 · Quick Start
|
||||
|
||||
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
lamp::{LAMPConfig, LAMPScore},
|
||||
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// 1. Describe what you want
|
||||
let lamp_stack = LAMPScore {
|
||||
name: "harmony-lamp-demo".into(),
|
||||
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
|
||||
php_version: Version::from("8.3.0").unwrap(),
|
||||
config: LAMPConfig {
|
||||
project_root: "./php".into(),
|
||||
database_size: "4Gi".into(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
// 2. Enhance with extra scores (monitoring, CI/CD, …)
|
||||
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||
|
||||
// 3. Run your scores on the desired topology & inventory
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||
vec![
|
||||
Box::new(lamp_stack),
|
||||
Box::new(monitoring)
|
||||
],
|
||||
None
|
||||
).await.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
This will run the nationtech bin (likely `private_repos/nationtech/src/main.rs`) on any change in the harmony or private_repos folders.
|
||||
Run it:
|
||||
|
||||
```bash
|
||||
cargo run
|
||||
```
|
||||
|
||||
Harmony analyses the code, shows an execution plan in a TUI, and applies it once you confirm. Same code, same binary—every environment.
|
||||
|
||||
---
|
||||
|
||||
## 3 · Core Concepts
|
||||
|
||||
| Term | One-liner |
|
||||
| ---------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
|
||||
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
|
||||
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
|
||||
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
|
||||
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
|
||||
|
||||
A visual overview is in the diagram below.
|
||||
|
||||
[Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg)
|
||||
|
||||
---
|
||||
|
||||
## 4 · Install
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Rust
|
||||
- Docker (if you deploy locally)
|
||||
- `kubectl` / `helm` for Kubernetes-based topologies
|
||||
|
||||
```bash
|
||||
git clone https://git.nationtech.io/nationtech/harmony
|
||||
cd harmony
|
||||
cargo build --release # builds the CLI, TUI and libraries
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5 · Learning More
|
||||
|
||||
- **Architectural Decision Records** – dive into the rationale
|
||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
||||
|
||||
- **Extending Harmony** – write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
|
||||
|
||||
- **Community** – discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||
|
||||
---
|
||||
|
||||
## 6 · License
|
||||
|
||||
Harmony is released under the **GNU AGPL v3**.
|
||||
|
||||
> We choose a strong copyleft license to ensure the project—and every improvement to it—remains open and benefits the entire community. Fork it, enhance it, even out-innovate us; just keep it open.
|
||||
|
||||
See [LICENSE](LICENSE) for the full text.
|
||||
|
||||
---
|
||||
|
||||
_Made with ❤️ & 🦀 by the NationTech and the Harmony community_
|
||||
|
||||
33
adr/000-ADR-Template.md
Normal file
33
adr/000-ADR-Template.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Architecture Decision Record: \<Title\>
|
||||
|
||||
Initial Author: \<Name\>
|
||||
|
||||
Initial Date: \<Date\>
|
||||
|
||||
Last Updated Date: \<Date\>
|
||||
|
||||
## Status
|
||||
|
||||
Proposed/Pending/Accepted/Implemented
|
||||
|
||||
## Context
|
||||
|
||||
The problem, background, the "why" behind this decision/discussion
|
||||
|
||||
## Decision
|
||||
|
||||
Proposed solution to the problem
|
||||
|
||||
## Rationale
|
||||
|
||||
Reasoning behind the decision
|
||||
|
||||
## Consequences
|
||||
|
||||
Pros/Cons of chosen solution
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
Pros/Cons of various proposed solutions considered
|
||||
|
||||
## Additional Notes
|
||||
360
adr/003-abstractions/main_context_prompt.md
Normal file
360
adr/003-abstractions/main_context_prompt.md
Normal file
@@ -0,0 +1,360 @@
|
||||
|
||||
# Here is the current condenses architecture sample for Harmony's core abstractions
|
||||
|
||||
```rust
|
||||
use std::process::Command;
|
||||
|
||||
pub trait Capability {}
|
||||
|
||||
pub trait CommandCapability: Capability {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String>;
|
||||
}
|
||||
|
||||
pub trait KubernetesCapability: Capability {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
pub trait Topology {
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub trait Score<T: Topology> {
|
||||
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String>;
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub struct LinuxHostTopology {
|
||||
name: String,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl Capability for LinuxHostTopology {}
|
||||
|
||||
impl LinuxHostTopology {
|
||||
pub fn new(name: String, host: String) -> Self {
|
||||
Self { name, host }
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for LinuxHostTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for LinuxHostTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
|
||||
println!("Executing on {}: {} {:?}", self.host, command, args);
|
||||
// In a real implementation, this would SSH to the host and execute the command
|
||||
let output = Command::new(command)
|
||||
.args(args)
|
||||
.output()
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
if output.status.success() {
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
} else {
|
||||
Err(String::from_utf8_lossy(&output.stderr).to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct K3DTopology {
|
||||
name: String,
|
||||
linux_host: LinuxHostTopology,
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl Capability for K3DTopology {}
|
||||
|
||||
impl K3DTopology {
|
||||
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
linux_host,
|
||||
cluster_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for K3DTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for K3DTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
|
||||
self.linux_host.execute_command(command, args)
|
||||
}
|
||||
}
|
||||
|
||||
impl KubernetesCapability for K3DTopology {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
|
||||
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
|
||||
// Write manifest to a temporary file
|
||||
let temp_file = format!("/tmp/manifest-harmony-temp.yaml");
|
||||
|
||||
// Use the linux_host directly to avoid capability trait bounds
|
||||
self.linux_host
|
||||
.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?;
|
||||
|
||||
// Apply with kubectl
|
||||
self.linux_host.execute_command("kubectl", &[
|
||||
"--context",
|
||||
&format!("k3d-{}", self.cluster_name),
|
||||
"apply",
|
||||
"-f",
|
||||
&temp_file,
|
||||
])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
|
||||
println!(
|
||||
"Getting resource {}/{} from K3D cluster '{}'",
|
||||
resource_type, name, self.cluster_name
|
||||
);
|
||||
self.linux_host.execute_command("kubectl", &[
|
||||
"--context",
|
||||
&format!("k3d-{}", self.cluster_name),
|
||||
"get",
|
||||
resource_type,
|
||||
name,
|
||||
"-o",
|
||||
"yaml",
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommandScore {
|
||||
name: String,
|
||||
command: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
impl CommandScore {
|
||||
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
|
||||
Self {
|
||||
name,
|
||||
command,
|
||||
args,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Interpret<T: Topology> {
|
||||
fn execute(&self, topology: &T) -> Result<String, String>;
|
||||
}
|
||||
|
||||
struct CommandInterpret;
|
||||
|
||||
impl<T> Interpret<T> for CommandInterpret
|
||||
where
|
||||
T: Topology + CommandCapability,
|
||||
{
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Score<T> for CommandScore
|
||||
where
|
||||
T: Topology + CommandCapability,
|
||||
{
|
||||
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
|
||||
Ok(Box::new(CommandInterpret {}))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct K8sResourceScore {
|
||||
name: String,
|
||||
manifest: String,
|
||||
}
|
||||
|
||||
impl K8sResourceScore {
|
||||
pub fn new(name: String, manifest: String) -> Self {
|
||||
Self { name, manifest }
|
||||
}
|
||||
}
|
||||
|
||||
struct K8sResourceInterpret {
|
||||
score: K8sResourceScore,
|
||||
}
|
||||
|
||||
impl<T: Topology + KubernetesCapability> Interpret<T> for K8sResourceInterpret {
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Score<T> for K8sResourceScore
|
||||
where
|
||||
T: Topology + KubernetesCapability,
|
||||
{
|
||||
fn compile(&self) -> Result<Box<(dyn Interpret<T> + 'static)>, String> {
|
||||
Ok(Box::new(K8sResourceInterpret {
|
||||
score: self.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Maestro<T: Topology> {
|
||||
topology: T,
|
||||
scores: Vec<Box<dyn Score<T>>>,
|
||||
}
|
||||
|
||||
|
||||
impl<T: Topology> Maestro<T> {
|
||||
pub fn new(topology: T) -> Self {
|
||||
Self {
|
||||
topology,
|
||||
scores: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_score<S>(&mut self, score: S)
|
||||
where
|
||||
S: Score<T> + 'static,
|
||||
{
|
||||
println!(
|
||||
"Registering score '{}' for topology '{}'",
|
||||
score.name(),
|
||||
self.topology.name()
|
||||
);
|
||||
self.scores.push(Box::new(score));
|
||||
}
|
||||
|
||||
pub fn orchestrate(&self) -> Result<(), String> {
|
||||
println!("Orchestrating topology '{}'", self.topology.name());
|
||||
for score in &self.scores {
|
||||
let interpret = score.compile()?;
|
||||
interpret.execute(&self.topology)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string());
|
||||
|
||||
let mut linux_maestro = Maestro::new(linux_host);
|
||||
|
||||
linux_maestro.register_score(CommandScore::new(
|
||||
"check-disk".to_string(),
|
||||
"df".to_string(),
|
||||
vec!["-h".to_string()],
|
||||
));
|
||||
linux_maestro.orchestrate().unwrap();
|
||||
|
||||
// This would fail to compile if we tried to register a K8sResourceScore
|
||||
// because LinuxHostTopology doesn't implement KubernetesCapability
|
||||
//linux_maestro.register_score(K8sResourceScore::new(
|
||||
// "...".to_string(),
|
||||
// "...".to_string(),
|
||||
//));
|
||||
|
||||
// Create a K3D topology which has both Command and Kubernetes capabilities
|
||||
let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string());
|
||||
|
||||
let k3d_topology = K3DTopology::new(
|
||||
"dev-cluster".to_string(),
|
||||
k3d_host,
|
||||
"devcluster".to_string(),
|
||||
);
|
||||
|
||||
// Create a maestro for the K3D topology
|
||||
let mut k3d_maestro = Maestro::new(k3d_topology);
|
||||
|
||||
// We can register both command scores and kubernetes scores
|
||||
k3d_maestro.register_score(CommandScore::new(
|
||||
"check-nodes".to_string(),
|
||||
"kubectl".to_string(),
|
||||
vec!["get".to_string(), "nodes".to_string()],
|
||||
));
|
||||
|
||||
k3d_maestro.register_score(K8sResourceScore::new(
|
||||
"deploy-nginx".to_string(),
|
||||
r#"
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
"#
|
||||
.to_string(),
|
||||
));
|
||||
|
||||
// Orchestrate both topologies
|
||||
linux_maestro.orchestrate().unwrap();
|
||||
k3d_maestro.orchestrate().unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Technical take
|
||||
|
||||
The key insight is that we might not need a complex TypeMap or runtime capability checking. Instead, we should leverage Rust's trait system to express capability requirements directly in the type system.
|
||||
|
||||
By clarifying the problem and focusing on type-level solutions rather than runtime checks, we can likely arrive at a simpler, more robust design that leverages the strengths of Rust's type system.
|
||||
|
||||
## Philosophical Shifts
|
||||
|
||||
1. **From Runtime to Compile-Time**: Move capability checking from runtime to compile-time.
|
||||
|
||||
2. **From Objects to Functions**: Think of scores less as objects and more as functions that transform topologies.
|
||||
|
||||
3. **From Homogeneous to Heterogeneous API**: Embrace different API paths for different capability combinations rather than trying to force everything through a single interface.
|
||||
|
||||
4. **From Complex to Simple**: Focus on making common cases simple, even if it means less abstraction for uncommon cases.
|
||||
|
||||
## High level concepts
|
||||
|
||||
The high level concepts so far has evolved towards this definition.
|
||||
|
||||
Topology -> Has -> Capabilities
|
||||
Score -> Defines -> Work to be done / desired state
|
||||
Interpret -> Requires -> Capabilities to execute a Score
|
||||
Maestro -> Enforces -> Compatibility (through the type system at compile time)
|
||||
|
||||
## Why Harmony
|
||||
|
||||
The compile time safety is paramount here. Harmony's main goal is to make the entire software delivery pipeline robust. Current IaC tools are very hard to work with, require complex setups to test and debug real code.
|
||||
|
||||
Leveraging Rust's compiler allows us to shift left a lot of the complexities and frustration that comes with using tools like Ansible that is Yaml based and quickly becomes brittle at scale. Or Terraform, when running a `terraform plan` makes you think everything is correct only to fail horribly when confidently launching `terraform apply` and leaving you with tens or hundreds of resources to clean manually.
|
||||
|
||||
Of course, this requires a significant effort to get to the point where we have actually implemented all the logic.
|
||||
|
||||
But using Rust and a Type Driven Design approach, we believe we are providing a much more robust foundation for our customer's and user's deployments anywhere.
|
||||
|
||||
Also, having the full power of a mature programming language like Rust enables organizations and the community to customize their deployment any way they want, build upon it in a reliable way that has been evolved and proven over decades of enterprise dependency management, API definitions, etc.
|
||||
|
||||
===
|
||||
|
||||
Given all this c
|
||||
10
adr/003-abstractions/topology/Cargo.toml
Normal file
10
adr/003-abstractions/topology/Cargo.toml
Normal file
@@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "example-topology"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
rand.workspace = true
|
||||
232
adr/003-abstractions/topology/src/main.rs
Normal file
232
adr/003-abstractions/topology/src/main.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
// Basic traits from your example
|
||||
trait Topology {}
|
||||
|
||||
trait Score: Clone + std::fmt::Debug {
|
||||
fn get_interpret<T: Topology>(&self) -> Box<dyn Interpret<T>>;
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
trait Interpret<T: Topology> {
|
||||
fn execute(&self);
|
||||
}
|
||||
|
||||
struct Maestro<T: Topology> {
|
||||
topology: T
|
||||
}
|
||||
|
||||
impl<T: Topology> Maestro<T> {
|
||||
pub fn new(topology: T) -> Self {
|
||||
Maestro { topology }
|
||||
}
|
||||
|
||||
pub fn register_score<S: Score + 'static>(&self, score: S) {
|
||||
println!("Registering score: {}", score.name());
|
||||
}
|
||||
|
||||
pub fn execute_score<S: Score + 'static>(&self, score: S) {
|
||||
println!("Executing score: {}", score.name());
|
||||
score.get_interpret::<T>().execute();
|
||||
}
|
||||
}
|
||||
|
||||
// Capability traits - these are used to enforce requirements
|
||||
trait CommandExecution {
|
||||
fn execute_command(&self, command: &[String]) -> Result<String, String>;
|
||||
}
|
||||
|
||||
trait FileSystem {
|
||||
fn read_file(&self, path: &str) -> Result<String, String>;
|
||||
fn write_file(&self, path: &str, content: &str) -> Result<(), String>;
|
||||
}
|
||||
|
||||
// A concrete topology implementation
|
||||
#[derive(Clone, Debug)]
|
||||
struct LinuxHostTopology {
|
||||
hostname: String,
|
||||
}
|
||||
|
||||
impl Topology for LinuxHostTopology {}
|
||||
|
||||
// Implement the capabilities for LinuxHostTopology
|
||||
impl CommandExecution for LinuxHostTopology {
|
||||
fn execute_command(&self, command: &[String]) -> Result<String, String> {
|
||||
println!("Executing command on {}: {:?}", self.hostname, command);
|
||||
// In a real implementation, this would use std::process::Command
|
||||
Ok(format!("Command executed successfully on {}", self.hostname))
|
||||
}
|
||||
}
|
||||
|
||||
impl FileSystem for LinuxHostTopology {
|
||||
fn read_file(&self, path: &str) -> Result<String, String> {
|
||||
println!("Reading file {} on {}", path, self.hostname);
|
||||
Ok(format!("Content of {} on {}", path, self.hostname))
|
||||
}
|
||||
|
||||
fn write_file(&self, path: &str, content: &str) -> Result<(), String> {
|
||||
println!("Writing to file {} on {}: {}", path, self.hostname, content);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Another topology that doesn't support command execution
|
||||
#[derive(Clone, Debug)]
|
||||
struct BareMetalTopology {
|
||||
device_id: String,
|
||||
}
|
||||
|
||||
impl Topology for BareMetalTopology {}
|
||||
|
||||
impl FileSystem for BareMetalTopology {
|
||||
fn read_file(&self, path: &str) -> Result<String, String> {
|
||||
println!("Reading file {} on device {}", path, self.device_id);
|
||||
Ok(format!("Content of {} on device {}", path, self.device_id))
|
||||
}
|
||||
|
||||
fn write_file(&self, path: &str, content: &str) -> Result<(), String> {
|
||||
println!("Writing to file {} on device {}: {}", path, self.device_id, content);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// CommandScore implementation
|
||||
#[derive(Clone, Debug)]
|
||||
struct CommandScore {
|
||||
name: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
impl CommandScore {
|
||||
pub fn new(name: String, args: Vec<String>) -> Self {
|
||||
CommandScore { name, args }
|
||||
}
|
||||
}
|
||||
|
||||
impl Score for CommandScore {
|
||||
fn get_interpret<T: Topology + CommandExecution + 'static>(&self) -> Box<dyn Interpret<T>> {
|
||||
// This is the key part: we constrain T to implement CommandExecution
|
||||
// If T doesn't implement CommandExecution, this will fail to compile
|
||||
Box::new(CommandInterpret::<T>::new(self.clone()))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
}
|
||||
|
||||
// CommandInterpret implementation
|
||||
struct CommandInterpret<T: Topology + CommandExecution> {
|
||||
score: CommandScore,
|
||||
_marker: std::marker::PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: Topology + CommandExecution> CommandInterpret<T> {
|
||||
pub fn new(score: CommandScore) -> Self {
|
||||
CommandInterpret {
|
||||
score,
|
||||
_marker: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + CommandExecution> Interpret<T> for CommandInterpret<T> {
|
||||
fn execute(&self) {
|
||||
println!("Command interpret is executing: {:?}", self.score.args);
|
||||
// In a real implementation, you would call the topology's execute_command method
|
||||
// topology.execute_command(&self.score.args);
|
||||
}
|
||||
}
|
||||
|
||||
// FileScore implementation - a different type of score that requires FileSystem capability
|
||||
#[derive(Clone, Debug)]
|
||||
struct FileScore {
|
||||
name: String,
|
||||
path: String,
|
||||
content: Option<String>,
|
||||
}
|
||||
|
||||
impl FileScore {
|
||||
pub fn new_read(name: String, path: String) -> Self {
|
||||
FileScore { name, path, content: None }
|
||||
}
|
||||
|
||||
pub fn new_write(name: String, path: String, content: String) -> Self {
|
||||
FileScore { name, path, content: Some(content) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Score for FileScore {
|
||||
fn get_interpret<T: Topology>(&self) -> Box<dyn Interpret<T>> {
|
||||
// This constrains T to implement FileSystem
|
||||
Box::new(FileInterpret::<T>::new(self.clone()))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
}
|
||||
|
||||
// FileInterpret implementation
|
||||
struct FileInterpret<T: Topology + FileSystem> {
|
||||
score: FileScore,
|
||||
_marker: std::marker::PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: Topology + FileSystem> FileInterpret<T> {
|
||||
pub fn new(score: FileScore) -> Self {
|
||||
FileInterpret {
|
||||
score,
|
||||
_marker: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + FileSystem> Interpret<T> for FileInterpret<T> {
|
||||
fn execute(&self) {
|
||||
match &self.score.content {
|
||||
Some(content) => {
|
||||
println!("File interpret is writing to {}: {}", self.score.path, content);
|
||||
// In a real implementation: topology.write_file(&self.score.path, content);
|
||||
},
|
||||
None => {
|
||||
println!("File interpret is reading from {}", self.score.path);
|
||||
// In a real implementation: let content = topology.read_file(&self.score.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Create our topologies
|
||||
let linux = LinuxHostTopology { hostname: "server1.example.com".to_string() };
|
||||
let bare_metal = BareMetalTopology { device_id: "device001".to_string() };
|
||||
|
||||
// Create our maestros
|
||||
let linux_maestro = Maestro::new(linux);
|
||||
let bare_metal_maestro = Maestro::new(bare_metal);
|
||||
|
||||
// Create scores
|
||||
let command_score = CommandScore::new(
|
||||
"List Files".to_string(),
|
||||
vec!["ls".to_string(), "-la".to_string()]
|
||||
);
|
||||
|
||||
let file_read_score = FileScore::new_read(
|
||||
"Read Config".to_string(),
|
||||
"/etc/config.json".to_string()
|
||||
);
|
||||
|
||||
// This will work because LinuxHostTopology implements CommandExecution
|
||||
linux_maestro.execute_score(command_score.clone());
|
||||
|
||||
// This will work because LinuxHostTopology implements FileSystem
|
||||
linux_maestro.execute_score(file_read_score.clone());
|
||||
|
||||
// This will work because BareMetalTopology implements FileSystem
|
||||
bare_metal_maestro.execute_score(file_read_score);
|
||||
|
||||
// This would NOT compile because BareMetalTopology doesn't implement CommandExecution:
|
||||
// bare_metal_maestro.execute_score(command_score);
|
||||
// The error would occur at compile time, ensuring type safety
|
||||
|
||||
println!("All scores executed successfully!");
|
||||
}
|
||||
314
adr/003-abstractions/topology/src/main_claude37_2.rs
Normal file
314
adr/003-abstractions/topology/src/main_claude37_2.rs
Normal file
@@ -0,0 +1,314 @@
|
||||
mod main_gemini25pro;
|
||||
use std::process::Command;
|
||||
|
||||
pub trait Capability {}
|
||||
|
||||
pub trait CommandCapability: Capability {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String>;
|
||||
}
|
||||
|
||||
pub trait KubernetesCapability: Capability {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
pub trait Topology {
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub trait Score<T: Topology> {
|
||||
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String>;
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub struct LinuxHostTopology {
|
||||
name: String,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl Capability for LinuxHostTopology {}
|
||||
|
||||
impl LinuxHostTopology {
|
||||
pub fn new(name: String, host: String) -> Self {
|
||||
Self { name, host }
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for LinuxHostTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for LinuxHostTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
|
||||
println!("Executing on {}: {} {:?}", self.host, command, args);
|
||||
// In a real implementation, this would SSH to the host and execute the command
|
||||
let output = Command::new(command)
|
||||
.args(args)
|
||||
.output()
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
if output.status.success() {
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
} else {
|
||||
Err(String::from_utf8_lossy(&output.stderr).to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct K3DTopology {
|
||||
name: String,
|
||||
linux_host: LinuxHostTopology,
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl Capability for K3DTopology {}
|
||||
|
||||
impl K3DTopology {
|
||||
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
linux_host,
|
||||
cluster_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for K3DTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for K3DTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
|
||||
self.linux_host.execute_command(command, args)
|
||||
}
|
||||
}
|
||||
|
||||
impl KubernetesCapability for K3DTopology {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
|
||||
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
|
||||
// Write manifest to a temporary file
|
||||
let temp_file = format!("/tmp/manifest-harmony-temp.yaml");
|
||||
|
||||
// Use the linux_host directly to avoid capability trait bounds
|
||||
self.linux_host
|
||||
.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?;
|
||||
|
||||
// Apply with kubectl
|
||||
self.linux_host.execute_command("kubectl", &[
|
||||
"--context",
|
||||
&format!("k3d-{}", self.cluster_name),
|
||||
"apply",
|
||||
"-f",
|
||||
&temp_file,
|
||||
])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
|
||||
println!(
|
||||
"Getting resource {}/{} from K3D cluster '{}'",
|
||||
resource_type, name, self.cluster_name
|
||||
);
|
||||
self.linux_host.execute_command("kubectl", &[
|
||||
"--context",
|
||||
&format!("k3d-{}", self.cluster_name),
|
||||
"get",
|
||||
resource_type,
|
||||
name,
|
||||
"-o",
|
||||
"yaml",
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommandScore {
|
||||
name: String,
|
||||
command: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
impl CommandScore {
|
||||
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
|
||||
Self {
|
||||
name,
|
||||
command,
|
||||
args,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Interpret<T: Topology> {
|
||||
fn execute(&self, topology: &T) -> Result<String, String>;
|
||||
}
|
||||
|
||||
struct CommandInterpret;
|
||||
|
||||
impl<T> Interpret<T> for CommandInterpret
|
||||
where
|
||||
T: Topology + CommandCapability,
|
||||
{
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Score<T> for CommandScore
|
||||
where
|
||||
T: Topology + CommandCapability,
|
||||
{
|
||||
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
|
||||
Ok(Box::new(CommandInterpret {}))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct K8sResourceScore {
|
||||
name: String,
|
||||
manifest: String,
|
||||
}
|
||||
|
||||
impl K8sResourceScore {
|
||||
pub fn new(name: String, manifest: String) -> Self {
|
||||
Self { name, manifest }
|
||||
}
|
||||
}
|
||||
|
||||
struct K8sResourceInterpret {
|
||||
score: K8sResourceScore,
|
||||
}
|
||||
|
||||
impl<T: Topology + KubernetesCapability> Interpret<T> for K8sResourceInterpret {
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Score<T> for K8sResourceScore
|
||||
where
|
||||
T: Topology + KubernetesCapability,
|
||||
{
|
||||
fn compile(&self) -> Result<Box<(dyn Interpret<T> + 'static)>, String> {
|
||||
Ok(Box::new(K8sResourceInterpret {
|
||||
score: self.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Maestro<T: Topology> {
|
||||
topology: T,
|
||||
scores: Vec<Box<dyn Score<T>>>,
|
||||
}
|
||||
|
||||
|
||||
impl<T: Topology> Maestro<T> {
|
||||
pub fn new(topology: T) -> Self {
|
||||
Self {
|
||||
topology,
|
||||
scores: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_score<S>(&mut self, score: S)
|
||||
where
|
||||
S: Score<T> + 'static,
|
||||
{
|
||||
println!(
|
||||
"Registering score '{}' for topology '{}'",
|
||||
score.name(),
|
||||
self.topology.name()
|
||||
);
|
||||
self.scores.push(Box::new(score));
|
||||
}
|
||||
|
||||
pub fn orchestrate(&self) -> Result<(), String> {
|
||||
println!("Orchestrating topology '{}'", self.topology.name());
|
||||
for score in &self.scores {
|
||||
let interpret = score.compile()?;
|
||||
interpret.execute(&self.topology)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string());
|
||||
|
||||
let mut linux_maestro = Maestro::new(linux_host);
|
||||
|
||||
linux_maestro.register_score(CommandScore::new(
|
||||
"check-disk".to_string(),
|
||||
"df".to_string(),
|
||||
vec!["-h".to_string()],
|
||||
));
|
||||
linux_maestro.orchestrate().unwrap();
|
||||
|
||||
// This would fail to compile if we tried to register a K8sResourceScore
|
||||
// because LinuxHostTopology doesn't implement KubernetesCapability
|
||||
//linux_maestro.register_score(K8sResourceScore::new(
|
||||
// "...".to_string(),
|
||||
// "...".to_string(),
|
||||
//));
|
||||
|
||||
// Create a K3D topology which has both Command and Kubernetes capabilities
|
||||
let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string());
|
||||
|
||||
let k3d_topology = K3DTopology::new(
|
||||
"dev-cluster".to_string(),
|
||||
k3d_host,
|
||||
"devcluster".to_string(),
|
||||
);
|
||||
|
||||
// Create a maestro for the K3D topology
|
||||
let mut k3d_maestro = Maestro::new(k3d_topology);
|
||||
|
||||
// We can register both command scores and kubernetes scores
|
||||
k3d_maestro.register_score(CommandScore::new(
|
||||
"check-nodes".to_string(),
|
||||
"kubectl".to_string(),
|
||||
vec!["get".to_string(), "nodes".to_string()],
|
||||
));
|
||||
|
||||
k3d_maestro.register_score(K8sResourceScore::new(
|
||||
"deploy-nginx".to_string(),
|
||||
r#"
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
"#
|
||||
.to_string(),
|
||||
));
|
||||
|
||||
// Orchestrate both topologies
|
||||
linux_maestro.orchestrate().unwrap();
|
||||
k3d_maestro.orchestrate().unwrap();
|
||||
}
|
||||
323
adr/003-abstractions/topology/src/main_claudev1.rs
Normal file
323
adr/003-abstractions/topology/src/main_claudev1.rs
Normal file
@@ -0,0 +1,323 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::process::Command;
|
||||
|
||||
// ===== Capability Traits =====
|
||||
|
||||
/// Base trait for all capabilities
|
||||
pub trait Capability {}
|
||||
|
||||
/// Capability for executing shell commands on a host
|
||||
pub trait CommandCapability: Capability {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String>;
|
||||
}
|
||||
|
||||
/// Capability for interacting with a Kubernetes cluster
|
||||
pub trait KubernetesCapability: Capability {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
// ===== Topology Traits =====
|
||||
|
||||
/// Base trait for all topologies
|
||||
pub trait Topology {
|
||||
// Base topology methods that don't depend on capabilities
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
// ===== Score Traits =====
|
||||
|
||||
/// Generic Score trait with an associated Capability type
|
||||
pub trait Score<T: Topology> {
|
||||
fn apply(&self, topology: &T) -> Result<(), String>;
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
// ===== Concrete Topologies =====
|
||||
|
||||
/// A topology representing a Linux host
|
||||
pub struct LinuxHostTopology {
|
||||
name: String,
|
||||
host: String,
|
||||
}
|
||||
|
||||
impl LinuxHostTopology {
|
||||
pub fn new(name: String, host: String) -> Self {
|
||||
Self { name, host }
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for LinuxHostTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for LinuxHostTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
|
||||
println!("Executing on {}: {} {:?}", self.host, command, args);
|
||||
// In a real implementation, this would SSH to the host and execute the command
|
||||
let output = Command::new(command)
|
||||
.args(args)
|
||||
.output()
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
if output.status.success() {
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
} else {
|
||||
Err(String::from_utf8_lossy(&output.stderr).to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A topology representing a K3D Kubernetes cluster
|
||||
pub struct K3DTopology {
|
||||
name: String,
|
||||
linux_host: LinuxHostTopology,
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl K3DTopology {
|
||||
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
linux_host,
|
||||
cluster_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for K3DTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for K3DTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
|
||||
// Delegate to the underlying Linux host
|
||||
self.linux_host.execute_command(command, args)
|
||||
}
|
||||
}
|
||||
|
||||
impl KubernetesCapability for K3DTopology {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
|
||||
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
|
||||
// Write manifest to a temporary file
|
||||
let temp_file = format!("/tmp/manifest-{}.yaml", rand::random::<u32>());
|
||||
self.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?;
|
||||
|
||||
// Apply with kubectl
|
||||
self.execute_command(
|
||||
"kubectl",
|
||||
&["--context", &format!("k3d-{}", self.cluster_name), "apply", "-f", &temp_file]
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
|
||||
println!("Getting resource {}/{} from K3D cluster '{}'", resource_type, name, self.cluster_name);
|
||||
self.execute_command(
|
||||
"kubectl",
|
||||
&[
|
||||
"--context",
|
||||
&format!("k3d-{}", self.cluster_name),
|
||||
"get",
|
||||
resource_type,
|
||||
name,
|
||||
"-o",
|
||||
"yaml",
|
||||
]
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ===== Concrete Scores =====
|
||||
|
||||
/// A score that executes commands on a topology
|
||||
pub struct CommandScore {
|
||||
name: String,
|
||||
command: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
impl CommandScore {
|
||||
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
|
||||
Self { name, command, args }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Score<T> for CommandScore
|
||||
where
|
||||
T: Topology + CommandCapability
|
||||
{
|
||||
fn apply(&self, topology: &T) -> Result<(), String> {
|
||||
println!("Applying CommandScore '{}' to topology '{}'", self.name, topology.name());
|
||||
let args_refs: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
|
||||
topology.execute_command(&self.command, &args_refs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
/// A score that applies Kubernetes resources to a topology
|
||||
pub struct K8sResourceScore {
|
||||
name: String,
|
||||
manifest: String,
|
||||
}
|
||||
|
||||
impl K8sResourceScore {
|
||||
pub fn new(name: String, manifest: String) -> Self {
|
||||
Self { name, manifest }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Score<T> for K8sResourceScore
|
||||
where
|
||||
T: Topology + KubernetesCapability
|
||||
{
|
||||
fn apply(&self, topology: &T) -> Result<(), String> {
|
||||
println!("Applying K8sResourceScore '{}' to topology '{}'", self.name, topology.name());
|
||||
topology.apply_manifest(&self.manifest)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
// ===== Maestro Orchestrator =====
|
||||
|
||||
/// Type-safe orchestrator that enforces capability requirements at compile time
|
||||
pub struct Maestro<T: Topology> {
|
||||
topology: T,
|
||||
scores: Vec<Box<dyn ScoreWrapper<T>>>,
|
||||
}
|
||||
|
||||
/// A trait object wrapper that hides the specific Score type but preserves its
|
||||
/// capability requirements
|
||||
trait ScoreWrapper<T: Topology> {
|
||||
fn apply(&self, topology: &T) -> Result<(), String>;
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
/// Implementation of ScoreWrapper for any Score that works with topology T
|
||||
impl<T, S> ScoreWrapper<T> for S
|
||||
where
|
||||
T: Topology,
|
||||
S: Score<T> + 'static
|
||||
{
|
||||
fn apply(&self, topology: &T) -> Result<(), String> {
|
||||
<S as Score<T>>::apply(self, topology)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
<S as Score<T>>::name(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology> Maestro<T> {
|
||||
pub fn new(topology: T) -> Self {
|
||||
Self {
|
||||
topology,
|
||||
scores: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a score that is compatible with this topology's capabilities
|
||||
pub fn register_score<S>(&mut self, score: S)
|
||||
where
|
||||
S: Score<T> + 'static
|
||||
{
|
||||
println!("Registering score '{}' for topology '{}'", score.name(), self.topology.name());
|
||||
self.scores.push(Box::new(score));
|
||||
}
|
||||
|
||||
/// Apply all registered scores to the topology
|
||||
pub fn orchestrate(&self) -> Result<(), String> {
|
||||
println!("Orchestrating topology '{}'", self.topology.name());
|
||||
for score in &self.scores {
|
||||
score.apply(&self.topology)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ===== Example Usage =====
|
||||
|
||||
fn main() {
|
||||
// Create a Linux host topology
|
||||
let linux_host = LinuxHostTopology::new(
|
||||
"dev-machine".to_string(),
|
||||
"localhost".to_string()
|
||||
);
|
||||
|
||||
// Create a maestro for the Linux host
|
||||
let mut linux_maestro = Maestro::new(linux_host);
|
||||
|
||||
// Register a command score that works with any topology having CommandCapability
|
||||
linux_maestro.register_score(CommandScore::new(
|
||||
"check-disk".to_string(),
|
||||
"df".to_string(),
|
||||
vec!["-h".to_string()]
|
||||
));
|
||||
|
||||
// This would fail to compile if we tried to register a K8sResourceScore
|
||||
// because LinuxHostTopology doesn't implement KubernetesCapability
|
||||
// linux_maestro.register_score(K8sResourceScore::new(...));
|
||||
|
||||
// Create a K3D topology which has both Command and Kubernetes capabilities
|
||||
let k3d_host = LinuxHostTopology::new(
|
||||
"k3d-host".to_string(),
|
||||
"localhost".to_string()
|
||||
);
|
||||
|
||||
let k3d_topology = K3DTopology::new(
|
||||
"dev-cluster".to_string(),
|
||||
k3d_host,
|
||||
"devcluster".to_string()
|
||||
);
|
||||
|
||||
// Create a maestro for the K3D topology
|
||||
let mut k3d_maestro = Maestro::new(k3d_topology);
|
||||
|
||||
// We can register both command scores and kubernetes scores
|
||||
k3d_maestro.register_score(CommandScore::new(
|
||||
"check-nodes".to_string(),
|
||||
"kubectl".to_string(),
|
||||
vec!["get".to_string(), "nodes".to_string()]
|
||||
));
|
||||
|
||||
k3d_maestro.register_score(K8sResourceScore::new(
|
||||
"deploy-nginx".to_string(),
|
||||
r#"
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
"#.to_string()
|
||||
));
|
||||
|
||||
// Orchestrate both topologies
|
||||
linux_maestro.orchestrate().unwrap();
|
||||
k3d_maestro.orchestrate().unwrap();
|
||||
}
|
||||
369
adr/003-abstractions/topology/src/main_gemini25pro.rs
Normal file
369
adr/003-abstractions/topology/src/main_gemini25pro.rs
Normal file
@@ -0,0 +1,369 @@
|
||||
// Import necessary items (though for this example, few are needed beyond std)
|
||||
use std::fmt;
|
||||
|
||||
// --- Error Handling ---
|
||||
// A simple error type for demonstration purposes. In a real app, use `thiserror` or `anyhow`.
|
||||
#[derive(Debug)]
|
||||
enum OrchestrationError {
|
||||
CommandFailed(String),
|
||||
KubeClientError(String),
|
||||
TopologySetupFailed(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for OrchestrationError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
OrchestrationError::CommandFailed(e) => write!(f, "Command execution failed: {}", e),
|
||||
OrchestrationError::KubeClientError(e) => write!(f, "Kubernetes client error: {}", e),
|
||||
OrchestrationError::TopologySetupFailed(e) => write!(f, "Topology setup failed: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for OrchestrationError {}
|
||||
|
||||
// Define a common Result type
|
||||
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
|
||||
|
||||
// --- 1. Capability Specification (as Traits) ---
|
||||
|
||||
/// Capability trait representing the ability to run Linux commands.
|
||||
/// This follows the "Parse, Don't Validate" idea implicitly - if you have an object
|
||||
/// implementing this, you know you *can* run commands, no need to check later.
|
||||
trait LinuxOperations {
|
||||
fn run_command(&self, command: &str) -> Result<String>;
|
||||
}
|
||||
|
||||
/// A mock Kubernetes client trait for demonstration.
|
||||
trait KubeClient {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<()>;
|
||||
fn get_pods(&self, namespace: &str) -> Result<Vec<String>>;
|
||||
}
|
||||
|
||||
/// Mock implementation of a KubeClient.
|
||||
struct MockKubeClient {
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl KubeClient for MockKubeClient {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<()> {
|
||||
println!(
|
||||
"[{}] Applying Kubernetes manifest:\n---\n{}\n---",
|
||||
self.cluster_name, manifest
|
||||
);
|
||||
// Simulate success or failure
|
||||
if manifest.contains("invalid") {
|
||||
Err(Box::new(OrchestrationError::KubeClientError(
|
||||
"Invalid manifest content".into(),
|
||||
)))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
fn get_pods(&self, namespace: &str) -> Result<Vec<String>> {
|
||||
println!(
|
||||
"[{}] Getting pods in namespace '{}'",
|
||||
self.cluster_name, namespace
|
||||
);
|
||||
Ok(vec![
|
||||
format!("pod-a-12345-{}-{}", namespace, self.cluster_name),
|
||||
format!("pod-b-67890-{}-{}", namespace, self.cluster_name),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/// Capability trait representing access to a Kubernetes cluster.
|
||||
/// This follows Rust Embedded WG's "Zero-Cost Abstractions" - the trait itself
|
||||
/// adds no runtime overhead, only compile-time structure.
|
||||
trait KubernetesCluster {
|
||||
// Provides access to a Kubernetes client instance.
|
||||
// Using `impl Trait` in return position for flexibility.
|
||||
fn get_kube_client(&self) -> Result<impl KubeClient>;
|
||||
}
|
||||
|
||||
// --- 2. Topology Implementations ---
|
||||
// Topologies implement the capabilities they provide.
|
||||
|
||||
/// Represents a basic Linux host.
|
||||
#[derive(Debug, Clone)]
|
||||
struct LinuxHostTopology {
|
||||
hostname: String,
|
||||
// In a real scenario: SSH connection details, etc.
|
||||
}
|
||||
|
||||
impl LinuxHostTopology {
|
||||
fn new(hostname: &str) -> Self {
|
||||
println!("Initializing LinuxHostTopology for {}", hostname);
|
||||
Self {
|
||||
hostname: hostname.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LinuxHostTopology provides LinuxOperations capability.
|
||||
impl LinuxOperations for LinuxHostTopology {
|
||||
fn run_command(&self, command: &str) -> Result<String> {
|
||||
println!("[{}] Running command: '{}'", self.hostname, command);
|
||||
// Simulate command execution (e.g., via SSH)
|
||||
if command.starts_with("fail") {
|
||||
Err(Box::new(OrchestrationError::CommandFailed(format!(
|
||||
"Command '{}' failed",
|
||||
command
|
||||
))))
|
||||
} else {
|
||||
Ok(format!("Output of '{}' on {}", command, self.hostname))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a K3D (Kubernetes in Docker) cluster running on a host.
|
||||
#[derive(Debug, Clone)]
|
||||
struct K3DTopology {
|
||||
cluster_name: String,
|
||||
host_os: String, // Example: might implicitly run commands on the underlying host
|
||||
// In a real scenario: Kubeconfig path, Docker client, etc.
|
||||
}
|
||||
|
||||
impl K3DTopology {
|
||||
fn new(cluster_name: &str) -> Self {
|
||||
println!("Initializing K3DTopology for cluster {}", cluster_name);
|
||||
Self {
|
||||
cluster_name: cluster_name.to_string(),
|
||||
host_os: "Linux".to_string(), // Assume k3d runs on Linux for this example
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// K3DTopology provides KubernetesCluster capability.
|
||||
impl KubernetesCluster for K3DTopology {
|
||||
fn get_kube_client(&self) -> Result<impl KubeClient> {
|
||||
println!("[{}] Creating mock Kubernetes client", self.cluster_name);
|
||||
// In a real scenario, this would initialize a client using kubeconfig etc.
|
||||
Ok(MockKubeClient {
|
||||
cluster_name: self.cluster_name.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// K3DTopology *also* provides LinuxOperations (e.g., for running commands inside nodes or on the host managing k3d).
|
||||
impl LinuxOperations for K3DTopology {
|
||||
fn run_command(&self, command: &str) -> Result<String> {
|
||||
println!(
|
||||
"[{} on {} host] Running command: '{}'",
|
||||
self.cluster_name, self.host_os, command
|
||||
);
|
||||
// Simulate command execution (maybe `docker exec` or similar)
|
||||
if command.starts_with("fail") {
|
||||
Err(Box::new(OrchestrationError::CommandFailed(format!(
|
||||
"Command '{}' failed within k3d context",
|
||||
command
|
||||
))))
|
||||
} else {
|
||||
Ok(format!(
|
||||
"Output of '{}' within k3d cluster {}",
|
||||
command, self.cluster_name
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- 3. Score Implementations ---
|
||||
// Scores require capabilities via trait bounds on their execution logic.
|
||||
|
||||
/// Base trait for identifying scores. Could be empty or hold metadata.
|
||||
trait Score {
|
||||
fn name(&self) -> &'static str;
|
||||
// We don't put execute here, as its signature depends on required capabilities.
|
||||
}
|
||||
|
||||
/// A score that runs a shell command on a Linux host.
|
||||
#[derive(Debug)]
|
||||
struct CommandScore {
|
||||
command: String,
|
||||
}
|
||||
|
||||
impl Score for CommandScore {
|
||||
fn name(&self) -> &'static str {
|
||||
"CommandScore"
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandScore {
|
||||
fn new(command: &str) -> Self {
|
||||
Self {
|
||||
command: command.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute method is generic over T, but requires T implements LinuxOperations.
|
||||
/// This follows the "Scores as Polymorphic Functions" idea.
|
||||
fn execute<T: LinuxOperations + ?Sized>(&self, topology: &T) -> Result<()> {
|
||||
println!("Executing Score: {}", Score::name(self));
|
||||
let output = topology.run_command(&self.command)?;
|
||||
println!("Command Score Output: {}", output);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A score that applies a Kubernetes resource manifest.
|
||||
#[derive(Debug)]
|
||||
struct K8sResourceScore {
|
||||
manifest_path: String, // Path or content
|
||||
}
|
||||
|
||||
impl Score for K8sResourceScore {
|
||||
fn name(&self) -> &'static str {
|
||||
"K8sResourceScore"
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sResourceScore {
|
||||
fn new(manifest_path: &str) -> Self {
|
||||
Self {
|
||||
manifest_path: manifest_path.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute method requires T implements KubernetesCluster.
|
||||
fn execute<T: KubernetesCluster + ?Sized>(&self, topology: &T) -> Result<()> {
|
||||
println!("Executing Score: {}", Score::name(self));
|
||||
let client = topology.get_kube_client()?;
|
||||
let manifest_content = format!(
|
||||
"apiVersion: v1\nkind: Pod\nmetadata:\n name: my-pod-from-{}",
|
||||
self.manifest_path
|
||||
); // Simulate reading file
|
||||
client.apply_manifest(&manifest_content)?;
|
||||
println!(
|
||||
"K8s Resource Score applied manifest: {}",
|
||||
self.manifest_path
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// --- 4. Maestro (The Orchestrator) ---
|
||||
|
||||
// This version of Maestro uses a helper trait (`ScoreRunner`) to enable
|
||||
// storing heterogeneous scores while preserving compile-time checks.
|
||||
|
||||
/// A helper trait to erase the specific capability requirements *after*
|
||||
/// the compiler has verified them, allowing storage in a Vec.
|
||||
/// The verification happens in the blanket impls below.
|
||||
trait ScoreRunner<T> {
|
||||
// T is the concrete Topology type
|
||||
fn run(&self, topology: &T) -> Result<()>;
|
||||
fn name(&self) -> &'static str;
|
||||
}
|
||||
|
||||
// Blanket implementation: A CommandScore can be run on any Topology T
|
||||
// *if and only if* T implements LinuxOperations.
|
||||
// The compiler checks this bound when `add_score` is called.
|
||||
impl<T: LinuxOperations> ScoreRunner<T> for CommandScore {
|
||||
fn run(&self, topology: &T) -> Result<()> {
|
||||
self.execute(topology) // Call the capability-specific execute method
|
||||
}
|
||||
fn name(&self) -> &'static str {
|
||||
Score::name(self)
|
||||
}
|
||||
}
|
||||
|
||||
// Blanket implementation: A K8sResourceScore can be run on any Topology T
|
||||
// *if and only if* T implements KubernetesCluster.
|
||||
impl<T: KubernetesCluster> ScoreRunner<T> for K8sResourceScore {
|
||||
fn run(&self, topology: &T) -> Result<()> {
|
||||
self.execute(topology) // Call the capability-specific execute method
|
||||
}
|
||||
fn name(&self) -> &'static str {
|
||||
Score::name(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// The Maestro orchestrator, strongly typed to a specific Topology `T`.
|
||||
struct Maestro<T> {
|
||||
topology: T,
|
||||
// Stores type-erased runners, but addition is type-safe.
|
||||
scores: Vec<Box<dyn ScoreRunner<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Maestro<T> {
|
||||
/// Creates a new Maestro instance bound to a specific topology.
|
||||
fn new(topology: T) -> Self {
|
||||
println!("Maestro initialized.");
|
||||
Maestro {
|
||||
topology,
|
||||
scores: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a score to the Maestro.
|
||||
/// **Compile-time check happens here!**
|
||||
/// The `S: ScoreRunner<T>` bound ensures that the score `S` provides an
|
||||
/// implementation of `ScoreRunner` *for the specific topology type `T`*.
|
||||
/// The blanket impls above ensure this is only possible if `T` has the
|
||||
/// required capabilities for `S`.
|
||||
/// This directly follows the "Theoretical Example: The Compiler as an Ally".
|
||||
fn add_score<S>(&mut self, score: S)
|
||||
where
|
||||
S: Score + ScoreRunner<T> + 'static, // S must be runnable on *this* T
|
||||
{
|
||||
println!("Registering score: {}", Score::name(&score));
|
||||
self.scores.push(Box::new(score));
|
||||
}
|
||||
|
||||
/// Runs all registered scores sequentially on the topology.
|
||||
fn run_all(&self) -> Vec<Result<()>> {
|
||||
println!("\n--- Running all scores ---");
|
||||
self.scores
|
||||
.iter()
|
||||
.map(|score_runner| {
|
||||
println!("---");
|
||||
let result = score_runner.run(&self.topology);
|
||||
match &result {
|
||||
Ok(_) => println!("Score '{}' completed successfully.", score_runner.name()),
|
||||
Err(e) => eprintln!("Score '{}' failed: {}", score_runner.name(), e),
|
||||
}
|
||||
result
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
// --- 5. Example Usage ---
|
||||
|
||||
fn main() {
|
||||
println!("=== Scenario 1: Linux Host Topology ===");
|
||||
let linux_host = LinuxHostTopology::new("server1.example.com");
|
||||
let mut maestro_linux = Maestro::new(linux_host);
|
||||
|
||||
// Add scores compatible with LinuxHostTopology (which has LinuxOperations)
|
||||
maestro_linux.add_score(CommandScore::new("uname -a"));
|
||||
maestro_linux.add_score(CommandScore::new("ls -l /tmp"));
|
||||
|
||||
// *** Compile-time Error Example ***
|
||||
// Try adding a score that requires KubernetesCluster capability.
|
||||
// This line WILL NOT COMPILE because LinuxHostTopology does not implement KubernetesCluster,
|
||||
// therefore K8sResourceScore does not implement ScoreRunner<LinuxHostTopology>.
|
||||
// maestro_linux.add_score(K8sResourceScore::new("my-app.yaml"));
|
||||
// Uncomment the line above to see the compiler error! The error message will
|
||||
// likely point to the `ScoreRunner<LinuxHostTopology>` bound not being satisfied
|
||||
// for `K8sResourceScore`.
|
||||
|
||||
let results_linux = maestro_linux.run_all();
|
||||
println!("\nLinux Host Results: {:?}", results_linux);
|
||||
|
||||
println!("\n=== Scenario 2: K3D Topology ===");
|
||||
let k3d_cluster = K3DTopology::new("dev-cluster");
|
||||
let mut maestro_k3d = Maestro::new(k3d_cluster);
|
||||
|
||||
// Add scores compatible with K3DTopology (which has LinuxOperations AND KubernetesCluster)
|
||||
maestro_k3d.add_score(CommandScore::new("pwd")); // Uses LinuxOperations
|
||||
maestro_k3d.add_score(K8sResourceScore::new("nginx-deployment.yaml")); // Uses KubernetesCluster
|
||||
maestro_k3d.add_score(K8sResourceScore::new("invalid-service.yaml")); // Test error case
|
||||
maestro_k3d.add_score(CommandScore::new("fail please")); // Test error case
|
||||
|
||||
let results_k3d = maestro_k3d.run_all();
|
||||
println!("\nK3D Cluster Results: {:?}", results_k3d);
|
||||
|
||||
println!("\n=== Compile-Time Safety Demonstrated ===");
|
||||
println!("(Check the commented-out line in the code for the compile error example)");
|
||||
}
|
||||
492
adr/003-abstractions/topology/src/main_geminifail.rs
Normal file
492
adr/003-abstractions/topology/src/main_geminifail.rs
Normal file
@@ -0,0 +1,492 @@
|
||||
use std::any::Any;
|
||||
use std::fmt::Debug;
|
||||
use std::process::Command;
|
||||
pub trait Capability {}
|
||||
|
||||
pub trait CommandCapability: Capability {
|
||||
fn execute_command(&self, command: &str, args: &Vec<String>) -> Result<String, String>;
|
||||
}
|
||||
|
||||
pub trait KubernetesCapability: Capability {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
pub trait Topology {
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub trait Interpret<T: Topology> {
|
||||
fn execute(&self, topology: &T) -> Result<String, String>;
|
||||
}
|
||||
|
||||
// --- Score Definition Structs (Concrete) ---
|
||||
// CommandScore struct remains the same
|
||||
#[derive(Debug, Clone)] // Added Debug/Clone for easier handling
|
||||
pub struct CommandScore {
|
||||
name: String,
|
||||
command: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
impl CommandScore {
|
||||
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
|
||||
Self { name, command, args }
|
||||
}
|
||||
}
|
||||
|
||||
// K8sResourceScore struct remains the same
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct K8sResourceScore {
|
||||
name: String,
|
||||
manifest: String,
|
||||
}
|
||||
|
||||
impl K8sResourceScore {
|
||||
pub fn new(name: String, manifest: String) -> Self {
|
||||
Self { name, manifest }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// --- Metadata / Base Score Trait (Non-Generic) ---
|
||||
// Trait for common info and enabling downcasting later if needed
|
||||
pub trait ScoreDefinition: Debug + Send + Sync {
|
||||
fn name(&self) -> &str;
|
||||
// Method to allow downcasting
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
// Optional: Could add methods for description, parameters etc.
|
||||
// fn description(&self) -> &str;
|
||||
|
||||
// Optional but potentially useful: A way to clone the definition
|
||||
fn box_clone(&self) -> Box<dyn ScoreDefinition>;
|
||||
}
|
||||
|
||||
// Implement Clone for Box<dyn ScoreDefinition>
|
||||
impl Clone for Box<dyn ScoreDefinition> {
|
||||
fn clone(&self) -> Self {
|
||||
self.box_clone()
|
||||
}
|
||||
}
|
||||
|
||||
// Implement ScoreDefinition for your concrete score types
|
||||
impl ScoreDefinition for CommandScore {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
fn box_clone(&self) -> Box<dyn ScoreDefinition> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl ScoreDefinition for K8sResourceScore {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
fn box_clone(&self) -> Box<dyn ScoreDefinition> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// --- Score Compatibility Trait (Generic over T) ---
|
||||
// This remains largely the same, ensuring compile-time checks
|
||||
pub trait Score<T: Topology>: ScoreDefinition {
|
||||
// No need for name() here, it's in ScoreDefinition
|
||||
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String>;
|
||||
}
|
||||
|
||||
// --- Implementations of Score<T> (Crucial Link) ---
|
||||
|
||||
// CommandScore implements Score<T> for any T with CommandCapability
|
||||
impl<T> Score<T> for CommandScore
|
||||
where
|
||||
T: Topology + CommandCapability + 'static, // Added 'static bound often needed for Box<dyn>
|
||||
// Self: ScoreDefinition // This bound is implicit now
|
||||
{
|
||||
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
|
||||
// Pass necessary data from self to CommandInterpret
|
||||
Ok(Box::new(CommandInterpret {
|
||||
command: self.command.clone(),
|
||||
args: self.args.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
// K8sResourceScore implements Score<T> for any T with KubernetesCapability
|
||||
impl<T> Score<T> for K8sResourceScore
|
||||
where
|
||||
T: Topology + KubernetesCapability + 'static,
|
||||
// Self: ScoreDefinition
|
||||
{
|
||||
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
|
||||
Ok(Box::new(K8sResourceInterpret {
|
||||
manifest: self.manifest.clone(), // Pass needed data
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// --- Interpret Implementations ---
|
||||
// Need to hold the actual data now
|
||||
|
||||
struct CommandInterpret {
|
||||
command: String,
|
||||
args: Vec<String>, // Or owned Strings if lifetime is tricky
|
||||
}
|
||||
|
||||
impl<'a, T> Interpret<T> for CommandInterpret
|
||||
where
|
||||
T: Topology + CommandCapability,
|
||||
{
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
// Now uses data stored in self
|
||||
topology.execute_command(&self.command, &self.args)
|
||||
}
|
||||
}
|
||||
|
||||
struct K8sResourceInterpret {
|
||||
manifest: String,
|
||||
}
|
||||
|
||||
impl<T: Topology + KubernetesCapability> Interpret<T> for K8sResourceInterpret {
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
topology.apply_manifest(&self.manifest)?;
|
||||
// apply_manifest returns Result<(), String>, adapt if needed
|
||||
Ok(format!("Applied manifest for {}", topology.name())) // Example success message
|
||||
}
|
||||
}
|
||||
|
||||
// --- Maestro ---
|
||||
// Maestro remains almost identical, leveraging the Score<T> bound
|
||||
pub struct Maestro<T: Topology> {
|
||||
topology: T,
|
||||
// Stores Score<T> trait objects, ensuring compatibility
|
||||
scores: Vec<Box<dyn Score<T>>>,
|
||||
}
|
||||
|
||||
impl<T: Topology + 'static> Maestro<T> { // Often need T: 'static here
|
||||
pub fn new(topology: T) -> Self {
|
||||
Self {
|
||||
topology,
|
||||
scores: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// This method signature is key - it takes a concrete S
|
||||
// and the compiler checks if S implements Score<T>
|
||||
pub fn register_score<S>(&mut self, score: S) -> Result<(), String>
|
||||
where
|
||||
S: Score<T> + ScoreDefinition + Clone + 'static, // Ensure S is a Score for *this* T
|
||||
// We might need S: Clone if we want to store Box::new(score)
|
||||
// Alternatively, accept Box<dyn ScoreDefinition> and try to downcast/wrap
|
||||
{
|
||||
println!(
|
||||
"Registering score '{}' for topology '{}'",
|
||||
score.name(),
|
||||
self.topology.name()
|
||||
);
|
||||
// The compiler has already guaranteed that S implements Score<T>
|
||||
// We need to box it as dyn Score<T>
|
||||
self.scores.push(Box::new(score));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Alternative registration if you have Box<dyn ScoreDefinition>
|
||||
pub fn register_score_definition(&mut self, score_def: Box<dyn ScoreDefinition>) -> Result<(), String>
|
||||
where
|
||||
T: Topology + CommandCapability + KubernetesCapability + 'static, // Example: list all needed caps here, or use generics + downcasting
|
||||
{
|
||||
println!(
|
||||
"Attempting to register score '{}' for topology '{}'",
|
||||
score_def.name(),
|
||||
self.topology.name()
|
||||
);
|
||||
|
||||
// Downcast to check concrete type and then check compatibility
|
||||
if let Some(cs) = score_def.as_any().downcast_ref::<CommandScore>() {
|
||||
// Check if T satisfies CommandScore's requirements (CommandCapability)
|
||||
// This check is somewhat manual or needs restructuring if we avoid listing all caps
|
||||
// A simpler way is to just try to create the Box<dyn Score<T>>
|
||||
let boxed_score: Box<dyn Score<T>> = Box::new(cs.clone()); // This relies on the blanket impls
|
||||
self.scores.push(boxed_score);
|
||||
Ok(())
|
||||
} else if let Some(ks) = score_def.as_any().downcast_ref::<K8sResourceScore>() {
|
||||
// Check if T satisfies K8sResourceScore's requirements (KubernetesCapability)
|
||||
let boxed_score: Box<dyn Score<T>> = Box::new(ks.clone());
|
||||
self.scores.push(boxed_score);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!("Score '{}' is of an unknown type or incompatible", score_def.name()))
|
||||
}
|
||||
// This downcasting approach in Maestro slightly undermines the full compile-time
|
||||
// check unless designed carefully. The generic `register_score<S: Score<T>>` is safer.
|
||||
}
|
||||
|
||||
|
||||
pub fn orchestrate(&self) -> Result<(), String> {
|
||||
println!("Orchestrating topology '{}'", self.topology.name());
|
||||
for score in &self.scores {
|
||||
println!("Compiling score '{}'", score.name()); // Use name() from ScoreDefinition
|
||||
let interpret = score.compile()?;
|
||||
println!("Executing score '{}'", score.name());
|
||||
interpret.execute(&self.topology)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// --- TUI Example ---
|
||||
struct ScoreItem {
|
||||
// Holds the definition/metadata, NOT the Score<T> trait object
|
||||
definition: Box<dyn ScoreDefinition>,
|
||||
}
|
||||
|
||||
struct HarmonyTui {
|
||||
// List of available score *definitions*
|
||||
available_scores: Vec<ScoreItem>,
|
||||
// Example: Maybe maps topology names to Maestros
|
||||
// maestros: HashMap<String, Box<dyn Any>>, // Storing Maestros generically is another challenge!
|
||||
}
|
||||
|
||||
impl HarmonyTui {
|
||||
fn new() -> Self {
|
||||
HarmonyTui { available_scores: vec![] }
|
||||
}
|
||||
|
||||
fn add_available_score(&mut self, score_def: Box<dyn ScoreDefinition>) {
|
||||
self.available_scores.push(ScoreItem { definition: score_def });
|
||||
}
|
||||
|
||||
fn display_scores(&self) {
|
||||
println!("Available Scores:");
|
||||
for (i, item) in self.available_scores.iter().enumerate() {
|
||||
println!("{}: {}", i, item.definition.name());
|
||||
}
|
||||
}
|
||||
|
||||
fn execute_score(&self, score: ScoreItem) {
|
||||
score.definition.
|
||||
|
||||
}
|
||||
|
||||
// Example: Function to add a selected score to a specific Maestro
|
||||
// This function would need access to the Maestros and handle the types
|
||||
fn add_selected_score_to_maestro<T>(
|
||||
&self,
|
||||
score_index: usize,
|
||||
maestro: &mut Maestro<T>
|
||||
) -> Result<(), String>
|
||||
where
|
||||
T: Topology + CommandCapability + KubernetesCapability + 'static, // Adjust bounds as needed
|
||||
{
|
||||
let score_item = self.available_scores.get(score_index)
|
||||
.ok_or("Invalid score index")?;
|
||||
|
||||
// We have Box<dyn ScoreDefinition>, need to add to Maestro<T>
|
||||
// Easiest is to downcast and call the generic register_score
|
||||
|
||||
if let Some(cs) = score_item.definition.as_any().downcast_ref::<CommandScore>() {
|
||||
// Compiler checks if CommandScore: Score<T> via register_score's bound
|
||||
maestro.register_score(cs.clone())?;
|
||||
Ok(())
|
||||
} else if let Some(ks) = score_item.definition.as_any().downcast_ref::<K8sResourceScore>() {
|
||||
// Compiler checks if K8sResourceScore: Score<T> via register_score's bound
|
||||
maestro.register_score(ks.clone())?;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!("Cannot add score '{}': Unknown type or check Maestro compatibility", score_item.definition.name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct K3DTopology {
|
||||
name: String,
|
||||
linux_host: LinuxHostTopology,
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl Capability for K3DTopology {}
|
||||
|
||||
impl K3DTopology {
|
||||
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
linux_host,
|
||||
cluster_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for K3DTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for K3DTopology {
|
||||
fn execute_command(&self, command: &str, args: &Vec<String>) -> Result<String, String> {
|
||||
self.linux_host.execute_command(command, args)
|
||||
}
|
||||
}
|
||||
|
||||
impl KubernetesCapability for K3DTopology {
|
||||
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
|
||||
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
|
||||
// Write manifest to a temporary file
|
||||
let temp_file = format!("/tmp/manifest-harmony-temp.yaml");
|
||||
|
||||
// Use the linux_host directly to avoid capability trait bounds
|
||||
self.linux_host
|
||||
.execute_command("bash", &Vec::from(["-c".to_string(), format!("cat > {}", temp_file)]))?;
|
||||
|
||||
// Apply with kubectl
|
||||
self.linux_host.execute_command("kubectl", &Vec::from([
|
||||
"--context".to_string(),
|
||||
format!("k3d-{}", self.cluster_name),
|
||||
"apply".to_string(),
|
||||
"-f".to_string(),
|
||||
temp_file.to_string(),
|
||||
]))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
|
||||
println!(
|
||||
"Getting resource {}/{} from K3D cluster '{}'",
|
||||
resource_type, name, self.cluster_name
|
||||
);
|
||||
self.linux_host.execute_command("kubectl", &Vec::from([
|
||||
"--context".to_string(),
|
||||
format!("k3d-{}", self.cluster_name),
|
||||
"get".to_string(),
|
||||
resource_type.to_string(),
|
||||
name.to_string(),
|
||||
"-o".to_string(),
|
||||
"yaml".to_string(),
|
||||
]))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct LinuxHostTopology {
|
||||
name: String,
|
||||
host: String,
|
||||
}
|
||||
impl Capability for LinuxHostTopology {}
|
||||
|
||||
impl LinuxHostTopology {
|
||||
pub fn new(name: String, host: String) -> Self {
|
||||
Self { name, host }
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for LinuxHostTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl CommandCapability for LinuxHostTopology {
|
||||
fn execute_command(&self, command: &str, args: &Vec<String>) -> Result<String, String> {
|
||||
println!("Executing on {}: {} {:?}", self.host, command, args);
|
||||
// In a real implementation, this would SSH to the host and execute the command
|
||||
let output = Command::new(command)
|
||||
.args(args)
|
||||
.output()
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
if output.status.success() {
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
} else {
|
||||
Err(String::from_utf8_lossy(&output.stderr).to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// --- Main Function Adapated ---
|
||||
fn main() {
|
||||
// --- Linux Host ---
|
||||
let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string());
|
||||
let mut linux_maestro = Maestro::new(linux_host);
|
||||
|
||||
let df_score = CommandScore::new(
|
||||
"check-disk".to_string(),
|
||||
"df".to_string(),
|
||||
vec!["-h".to_string()],
|
||||
);
|
||||
|
||||
// Registration uses the generic method, compiler checks CommandScore: Score<LinuxHostTopology>
|
||||
linux_maestro.register_score(df_score.clone()).unwrap(); // clone needed if df_score used later
|
||||
|
||||
// --- K3D Host ---
|
||||
let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string());
|
||||
let k3d_topology = K3DTopology::new(
|
||||
"dev-cluster".to_string(),
|
||||
k3d_host,
|
||||
"devcluster".to_string(),
|
||||
);
|
||||
let mut k3d_maestro = Maestro::new(k3d_topology);
|
||||
|
||||
let nodes_score = CommandScore::new(
|
||||
"check-nodes".to_string(),
|
||||
"kubectl".to_string(),
|
||||
vec!["get".to_string(), "nodes".to_string()],
|
||||
);
|
||||
let nginx_score = K8sResourceScore::new(
|
||||
"deploy-nginx".to_string(),
|
||||
// ... manifest string ...
|
||||
r#"..."#.to_string(),
|
||||
);
|
||||
|
||||
// Compiler checks CommandScore: Score<K3DTopology>
|
||||
k3d_maestro.register_score(nodes_score.clone()).unwrap();
|
||||
// Compiler checks K8sResourceScore: Score<K3DTopology>
|
||||
k3d_maestro.register_score(nginx_score.clone()).unwrap();
|
||||
|
||||
|
||||
// --- TUI Example Usage ---
|
||||
let mut tui = HarmonyTui::new();
|
||||
// Add score *definitions* to the TUI
|
||||
tui.add_available_score(Box::new(df_score));
|
||||
tui.add_available_score(Box::new(nodes_score));
|
||||
tui.add_available_score(Box::new(nginx_score));
|
||||
|
||||
tui.display_scores();
|
||||
|
||||
// Simulate user selecting score 0 (check-disk) and adding to linux_maestro
|
||||
match tui.add_selected_score_to_maestro(0, &mut linux_maestro) {
|
||||
Ok(_) => println!("Successfully registered check-disk to linux_maestro via TUI selection"),
|
||||
Err(e) => println!("Failed: {}", e), // Should succeed
|
||||
}
|
||||
|
||||
// Simulate user selecting score 2 (deploy-nginx) and adding to linux_maestro
|
||||
match tui.add_selected_score_to_maestro(2, &mut linux_maestro) {
|
||||
Ok(_) => println!("Successfully registered deploy-nginx to linux_maestro via TUI selection"), // Should fail!
|
||||
Err(e) => println!("Correctly failed to add deploy-nginx to linux_maestro: {}", e),
|
||||
// The failure happens inside add_selected_score_to_maestro because the
|
||||
// maestro.register_score(ks.clone()) call fails the trait bound check
|
||||
// K8sResourceScore: Score<LinuxHostTopology> is false.
|
||||
}
|
||||
|
||||
// Simulate user selecting score 2 (deploy-nginx) and adding to k3d_maestro
|
||||
match tui.add_selected_score_to_maestro(2, &mut k3d_maestro) {
|
||||
Ok(_) => println!("Successfully registered deploy-nginx to k3d_maestro via TUI selection"), // Should succeed
|
||||
Err(e) => println!("Failed: {}", e),
|
||||
}
|
||||
|
||||
// --- Orchestration ---
|
||||
println!("\n--- Orchestrating Linux Maestro ---");
|
||||
linux_maestro.orchestrate().unwrap();
|
||||
println!("\n--- Orchestrating K3D Maestro ---");
|
||||
k3d_maestro.orchestrate().unwrap();
|
||||
}
|
||||
129
adr/003-abstractions/topology/src/main_right.rs
Normal file
129
adr/003-abstractions/topology/src/main_right.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
// Capability Trait Hierarchy
|
||||
pub trait Capability {}
|
||||
|
||||
// Specific Capability Traits
|
||||
pub trait ShellAccess: Capability {}
|
||||
pub trait ContainerRuntime: Capability {}
|
||||
pub trait KubernetesAccess: Capability {}
|
||||
pub trait FileSystemAccess: Capability {}
|
||||
|
||||
// Topology Trait - Defines the core interface for infrastructure topologies
|
||||
pub trait Topology {
|
||||
type Capabilities: Capability;
|
||||
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
// Score Trait - Defines the core interface for infrastructure transformation
|
||||
pub trait Score {
|
||||
type RequiredCapabilities: Capability;
|
||||
type OutputTopology: Topology;
|
||||
|
||||
fn apply<T: Topology>(&self, topology: T) -> Result<Self::OutputTopology, String>;
|
||||
}
|
||||
|
||||
// Linux Host Topology
|
||||
pub struct LinuxHostTopology;
|
||||
|
||||
impl Topology for LinuxHostTopology {
|
||||
type Capabilities = dyn ShellAccess + FileSystemAccess;
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"Linux Host"
|
||||
}
|
||||
}
|
||||
|
||||
impl ShellAccess for LinuxHostTopology {}
|
||||
impl FileSystemAccess for LinuxHostTopology {}
|
||||
|
||||
// K3D Topology
|
||||
pub struct K3DTopology;
|
||||
|
||||
impl Topology for K3DTopology {
|
||||
type Capabilities = dyn ContainerRuntime + KubernetesAccess + ShellAccess;
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"K3D Kubernetes Cluster"
|
||||
}
|
||||
}
|
||||
|
||||
impl ContainerRuntime for K3DTopology {}
|
||||
impl KubernetesAccess for K3DTopology {}
|
||||
impl ShellAccess for K3DTopology {}
|
||||
|
||||
// Command Score - A score that requires shell access
|
||||
pub struct CommandScore {
|
||||
command: String,
|
||||
}
|
||||
|
||||
impl Score for CommandScore {
|
||||
type RequiredCapabilities = dyn ShellAccess;
|
||||
type OutputTopology = LinuxHostTopology;
|
||||
|
||||
fn apply<T: Topology>(&self, _topology: T) -> Result<Self::OutputTopology, String>
|
||||
where
|
||||
T: ShellAccess
|
||||
{
|
||||
// Simulate command execution
|
||||
println!("Executing command: {}", self.command);
|
||||
Ok(LinuxHostTopology)
|
||||
}
|
||||
}
|
||||
|
||||
// Kubernetes Resource Score
|
||||
pub struct K8sResourceScore {
|
||||
resource_definition: String,
|
||||
}
|
||||
|
||||
impl Score for K8sResourceScore {
|
||||
type RequiredCapabilities = dyn KubernetesAccess;
|
||||
type OutputTopology = K3DTopology;
|
||||
|
||||
fn apply<T: Topology>(&self, _topology: T) -> Result<Self::OutputTopology, String>
|
||||
where
|
||||
T: dyn KubernetesAccess
|
||||
{
|
||||
// Simulate Kubernetes resource application
|
||||
println!("Applying K8s resource: {}", self.resource_definition);
|
||||
Ok(K3DTopology)
|
||||
}
|
||||
}
|
||||
|
||||
// Maestro - The orchestration coordinator
|
||||
pub struct Maestro;
|
||||
|
||||
impl Maestro {
|
||||
// Type-safe score application
|
||||
pub fn apply_score<T, S>(topology: T, score: S) -> Result<S::OutputTopology, String>
|
||||
where
|
||||
T: Topology,
|
||||
S: Score,
|
||||
T: S::RequiredCapabilities
|
||||
{
|
||||
score.apply(topology)
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Example usage demonstrating type-driven design
|
||||
let linux_host = LinuxHostTopology;
|
||||
let k3d_cluster = K3DTopology;
|
||||
|
||||
// Command score on Linux host
|
||||
let command_score = CommandScore {
|
||||
command: "echo 'Hello, World!'".to_string(),
|
||||
};
|
||||
|
||||
let result = Maestro::apply_score(linux_host, command_score)
|
||||
.expect("Command score application failed");
|
||||
|
||||
// K8s resource score on K3D cluster
|
||||
let k8s_score = K8sResourceScore {
|
||||
resource_definition: "apiVersion: v1\nkind: Pod\n...".to_string(),
|
||||
};
|
||||
|
||||
let k8s_result = Maestro::apply_score(k3d_cluster, k8s_score)
|
||||
.expect("K8s resource score application failed");
|
||||
}
|
||||
155
adr/003-abstractions/topology/src/main_v1.rs
Normal file
155
adr/003-abstractions/topology/src/main_v1.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
mod main_right;
|
||||
mod main_claude;
|
||||
// Capability Traits
|
||||
|
||||
trait Capability {}
|
||||
|
||||
trait LinuxOperations: Capability {
|
||||
fn execute_command(&self, command: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
trait KubernetesOperations: Capability {
|
||||
fn create_resource(&self, resource: &str) -> Result<String, String>;
|
||||
fn delete_resource(&self, resource: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
// Topology Implementations
|
||||
|
||||
struct LinuxHostTopology;
|
||||
|
||||
impl LinuxOperations for LinuxHostTopology {
|
||||
fn execute_command(&self, command: &str) -> Result<String, String> {
|
||||
// Implementation for executing commands on a Linux host
|
||||
Ok(format!("Executed command: {}", command))
|
||||
}
|
||||
}
|
||||
|
||||
impl Capability for LinuxHostTopology {}
|
||||
|
||||
struct K3DTopology;
|
||||
|
||||
impl KubernetesOperations for K3DTopology {
|
||||
fn create_resource(&self, resource: &str) -> Result<String, String> {
|
||||
// Implementation for creating Kubernetes resources in K3D
|
||||
Ok(format!("Created resource: {}", resource))
|
||||
}
|
||||
|
||||
fn delete_resource(&self, resource: &str) -> Result<String, String> {
|
||||
// Implementation for deleting Kubernetes resources in K3D
|
||||
Ok(format!("Deleted resource: {}", resource))
|
||||
}
|
||||
}
|
||||
|
||||
impl Capability for K3DTopology {}
|
||||
|
||||
// Score Implementations
|
||||
|
||||
struct K8sResourceScore {
|
||||
resource: String,
|
||||
}
|
||||
|
||||
impl<T> Score<T> for K8sResourceScore
|
||||
where
|
||||
T: KubernetesOperations,
|
||||
{
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
topology.create_resource(&self.resource)
|
||||
}
|
||||
}
|
||||
|
||||
struct CommandScore {
|
||||
command: String,
|
||||
}
|
||||
|
||||
impl<T> Score<T> for CommandScore
|
||||
where
|
||||
T: LinuxOperations + 'static,
|
||||
{
|
||||
fn execute(&self, topology: &T) -> Result<String, String> {
|
||||
topology.execute_command(&self.command)
|
||||
}
|
||||
}
|
||||
|
||||
// Score Trait
|
||||
|
||||
trait Score<T>
|
||||
where
|
||||
T: Capability + 'static,
|
||||
{
|
||||
fn execute(&self, topology: &T) -> Result<String, String>;
|
||||
}
|
||||
|
||||
// Maestro Implementation
|
||||
|
||||
struct Maestro {
|
||||
scores: Vec<Box<dyn Score<Box<dyn Capability>>>>,
|
||||
}
|
||||
|
||||
impl Maestro {
|
||||
fn new() -> Self {
|
||||
Maestro { scores: Vec::new() }
|
||||
}
|
||||
|
||||
fn register_score<T>(&mut self, score: Box<T>)
|
||||
where
|
||||
T: Score<Box<dyn Capability>> + 'static,
|
||||
{
|
||||
self.scores.push(Box::new(score));
|
||||
}
|
||||
|
||||
fn execute_scores<T>(&self, topology: &T) -> Result<Vec<String>, String>
|
||||
where
|
||||
T: Capability + 'static,
|
||||
{
|
||||
let mut results = Vec::new();
|
||||
for score in &self.scores {
|
||||
if let Some(score) = score.as_any().downcast_ref::<Box<dyn Score<T>>>() {
|
||||
results.push(score.execute(topology)?);
|
||||
}
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper trait for downcasting
|
||||
|
||||
trait AsAny {
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
}
|
||||
|
||||
impl<T: 'static> AsAny for T {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
// Main Function
|
||||
|
||||
fn main() {
|
||||
let mut maestro = Maestro::new();
|
||||
|
||||
let k8s_score = K8sResourceScore {
|
||||
resource: "deployment.yaml".to_string(),
|
||||
};
|
||||
maestro.register_score(k8s_score);
|
||||
|
||||
let command_score = CommandScore {
|
||||
command: "ls -l".to_string(),
|
||||
};
|
||||
maestro.register_score(command_score);
|
||||
|
||||
let linux_topology = LinuxHostTopology;
|
||||
let k3d_topology = K3DTopology;
|
||||
|
||||
let linux_results = maestro.execute_scores(&linux_topology).unwrap();
|
||||
println!("Linux Topology Results:");
|
||||
for result in linux_results {
|
||||
println!("{}", result);
|
||||
}
|
||||
|
||||
let k3d_results = maestro.execute_scores(&k3d_topology).unwrap();
|
||||
println!("K3D Topology Results:");
|
||||
for result in k3d_results {
|
||||
println!("{}", result);
|
||||
}
|
||||
}
|
||||
9
adr/003-abstractions/topology2/Cargo.toml
Normal file
9
adr/003-abstractions/topology2/Cargo.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "example-topology2"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
183
adr/003-abstractions/topology2/src/main.rs
Normal file
183
adr/003-abstractions/topology2/src/main.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
// Clean capability-based design using type parameters
|
||||
|
||||
trait Capability {}
|
||||
|
||||
trait K8sCapability: Capability {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str);
|
||||
fn execute_kubectl(&self, command: &str) -> String;
|
||||
}
|
||||
|
||||
trait LinuxCapability: Capability {
|
||||
fn execute_command(&self, command: &str, args: &[&str]);
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
|
||||
}
|
||||
|
||||
trait LoadBalancerCapability: Capability {
|
||||
fn configure_load_balancer(&self, services: &[&str], port: u16);
|
||||
fn get_load_balancer_status(&self) -> String;
|
||||
}
|
||||
|
||||
// Score trait with capability type parameter
|
||||
trait Score<C: ?Sized> {
|
||||
fn execute(&self, capability: &C) -> String;
|
||||
}
|
||||
|
||||
// Topology implementations with marker trait
|
||||
trait Topology {}
|
||||
|
||||
struct K3DTopology {}
|
||||
impl Topology for K3DTopology {}
|
||||
impl Capability for K3DTopology {}
|
||||
impl K8sCapability for K3DTopology {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn execute_kubectl(&self, command: &str) -> String {
|
||||
todo!()
|
||||
}
|
||||
// Implementation...
|
||||
}
|
||||
|
||||
struct LinuxTopology {}
|
||||
impl Topology for LinuxTopology {}
|
||||
impl Capability for LinuxTopology {}
|
||||
impl LinuxCapability for LinuxTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
|
||||
todo!()
|
||||
}
|
||||
// Implementation...
|
||||
}
|
||||
|
||||
struct OKDHaClusterTopology {}
|
||||
impl Topology for OKDHaClusterTopology {}
|
||||
impl Capability for OKDHaClusterTopology {}
|
||||
impl K8sCapability for OKDHaClusterTopology {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn execute_kubectl(&self, command: &str) -> String {
|
||||
todo!()
|
||||
}
|
||||
// Implementation...
|
||||
}
|
||||
impl LinuxCapability for OKDHaClusterTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
|
||||
todo!()
|
||||
}
|
||||
// Implementation...
|
||||
}
|
||||
impl LoadBalancerCapability for OKDHaClusterTopology {
|
||||
fn configure_load_balancer(&self, services: &[&str], port: u16) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_load_balancer_status(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
// Implementation...
|
||||
}
|
||||
|
||||
// Score implementations
|
||||
struct LAMPScore {}
|
||||
impl Score<dyn K8sCapability> for LAMPScore {
|
||||
fn execute(&self, capability: &dyn K8sCapability) -> String {
|
||||
todo!()
|
||||
// Implementation...
|
||||
}
|
||||
}
|
||||
|
||||
struct BinaryScore {}
|
||||
impl Score<dyn LinuxCapability> for BinaryScore {
|
||||
fn execute(&self, capability: &dyn LinuxCapability) -> String {
|
||||
todo!()
|
||||
// Implementation...
|
||||
}
|
||||
}
|
||||
|
||||
struct LoadBalancerScore {}
|
||||
impl Score<dyn LoadBalancerCapability> for LoadBalancerScore {
|
||||
fn execute(&self, capability: &dyn LoadBalancerCapability) -> String {
|
||||
todo!()
|
||||
// Implementation...
|
||||
}
|
||||
}
|
||||
|
||||
// Generic Maestro
|
||||
struct Maestro<T> {
|
||||
topology: T,
|
||||
scores: Vec<Box<dyn FnMut(&T) -> String>>,
|
||||
}
|
||||
|
||||
impl<T: 'static> Maestro<T> {
|
||||
fn new(topology: T) -> Self {
|
||||
Self {
|
||||
topology,
|
||||
scores: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn interpret_all(&mut self) -> Vec<String> {
|
||||
self.scores.iter_mut()
|
||||
.map(|score| score(&self.topology))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
// Capability-specific extensions
|
||||
impl<T: K8sCapability + 'static> Maestro<T> {
|
||||
fn register_k8s_score<S: Score<dyn K8sCapability> + 'static>(&mut self, score: S) {
|
||||
let score_box = Box::new(move |topology: &T| {
|
||||
score.execute(topology as &dyn K8sCapability)
|
||||
});
|
||||
self.scores.push(score_box);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: LinuxCapability + 'static> Maestro<T> {
|
||||
fn register_linux_score<S: Score<dyn LinuxCapability> + 'static>(&mut self, score: S) {
|
||||
let score_box = Box::new(move |topology: &T| {
|
||||
score.execute(topology as &dyn LinuxCapability)
|
||||
});
|
||||
self.scores.push(score_box);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: LoadBalancerCapability + 'static> Maestro<T> {
|
||||
fn register_lb_score<S: Score<dyn LoadBalancerCapability> + 'static>(&mut self, score: S) {
|
||||
let score_box = Box::new(move |topology: &T| {
|
||||
score.execute(topology as &dyn LoadBalancerCapability)
|
||||
});
|
||||
self.scores.push(score_box);
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Example usage
|
||||
let k3d = K3DTopology {};
|
||||
let mut k3d_maestro = Maestro::new(k3d);
|
||||
|
||||
// These will compile because K3D implements K8sCapability
|
||||
k3d_maestro.register_k8s_score(LAMPScore {});
|
||||
|
||||
// This would not compile because K3D doesn't implement LoadBalancerCapability
|
||||
// k3d_maestro.register_lb_score(LoadBalancerScore {});
|
||||
|
||||
let linux = LinuxTopology {};
|
||||
let mut linux_maestro = Maestro::new(linux);
|
||||
|
||||
// This will compile because Linux implements LinuxCapability
|
||||
linux_maestro.register_linux_score(BinaryScore {});
|
||||
|
||||
// This would not compile because Linux doesn't implement K8sCapability
|
||||
// linux_maestro.register_k8s_score(LAMPScore {});
|
||||
}
|
||||
324
adr/003-abstractions/topology2/src/main_capabilities.rs
Normal file
324
adr/003-abstractions/topology2/src/main_capabilities.rs
Normal file
@@ -0,0 +1,324 @@
|
||||
fn main() {
|
||||
// Create various topologies
|
||||
let okd_topology = OKDHaClusterTopology::new();
|
||||
let k3d_topology = K3DTopology::new();
|
||||
let linux_topology = LinuxTopology::new();
|
||||
|
||||
// Create scores
|
||||
let lamp_score = LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4");
|
||||
let binary_score = BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"]);
|
||||
let load_balancer_score = LoadBalancerScore::new(vec!["service1", "service2"], 80);
|
||||
|
||||
// Example 1: Running LAMP stack on OKD
|
||||
println!("\n=== Deploying LAMP stack on OKD cluster ===");
|
||||
lamp_score.execute(&okd_topology);
|
||||
|
||||
// Example 2: Running LAMP stack on K3D
|
||||
println!("\n=== Deploying LAMP stack on K3D cluster ===");
|
||||
lamp_score.execute(&k3d_topology);
|
||||
|
||||
// Example 3: Running binary on Linux host
|
||||
println!("\n=== Running binary on Linux host ===");
|
||||
binary_score.execute(&linux_topology);
|
||||
|
||||
// Example 4: Running binary on OKD (which can also run Linux commands)
|
||||
println!("\n=== Running binary on OKD host ===");
|
||||
binary_score.execute(&okd_topology);
|
||||
|
||||
// Example 5: Load balancer configuration on OKD
|
||||
println!("\n=== Configuring load balancer on OKD ===");
|
||||
load_balancer_score.execute(&okd_topology);
|
||||
|
||||
// The following would not compile:
|
||||
// load_balancer_score.execute(&k3d_topology); // K3D doesn't implement LoadBalancerCapability
|
||||
// lamp_score.execute(&linux_topology); // Linux doesn't implement K8sCapability
|
||||
}
|
||||
|
||||
// Base Topology trait
|
||||
trait Topology {
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
// Define capabilities
|
||||
trait K8sCapability {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str);
|
||||
fn execute_kubectl(&self, command: &str) -> String;
|
||||
}
|
||||
|
||||
trait OKDCapability: K8sCapability {
|
||||
fn execute_oc(&self, command: &str) -> String;
|
||||
}
|
||||
|
||||
trait LinuxCapability {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> String;
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
|
||||
}
|
||||
|
||||
trait LoadBalancerCapability {
|
||||
fn configure_load_balancer(&self, services: &[&str], port: u16);
|
||||
fn get_load_balancer_status(&self) -> String;
|
||||
}
|
||||
|
||||
trait FirewallCapability {
|
||||
fn open_port(&self, port: u16, protocol: &str);
|
||||
fn close_port(&self, port: u16, protocol: &str);
|
||||
}
|
||||
|
||||
trait RouterCapability {
|
||||
fn configure_route(&self, service: &str, hostname: &str);
|
||||
}
|
||||
|
||||
// Topology implementations
|
||||
struct OKDHaClusterTopology {
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl OKDHaClusterTopology {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
cluster_name: "okd-ha-cluster".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for OKDHaClusterTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.cluster_name
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sCapability for OKDHaClusterTopology {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str) {
|
||||
println!("Deploying K8s resource on OKD cluster: {}", resource_yaml);
|
||||
}
|
||||
|
||||
fn execute_kubectl(&self, command: &str) -> String {
|
||||
println!("Executing kubectl command on OKD cluster: {}", command);
|
||||
"kubectl command output".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl OKDCapability for OKDHaClusterTopology {
|
||||
fn execute_oc(&self, command: &str) -> String {
|
||||
println!("Executing oc command on OKD cluster: {}", command);
|
||||
"oc command output".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl LinuxCapability for OKDHaClusterTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> String {
|
||||
println!(
|
||||
"Executing command '{}' with args {:?} on OKD node",
|
||||
command, args
|
||||
);
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
|
||||
println!(
|
||||
"Downloading file from {} to {} on OKD node",
|
||||
url, destination
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl LoadBalancerCapability for OKDHaClusterTopology {
|
||||
fn configure_load_balancer(&self, services: &[&str], port: u16) {
|
||||
println!(
|
||||
"Configuring load balancer for services {:?} on port {} in OKD",
|
||||
services, port
|
||||
);
|
||||
}
|
||||
|
||||
fn get_load_balancer_status(&self) -> String {
|
||||
"OKD Load Balancer: HEALTHY".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl FirewallCapability for OKDHaClusterTopology {
|
||||
fn open_port(&self, port: u16, protocol: &str) {
|
||||
println!(
|
||||
"Opening port {} with protocol {} on OKD firewall",
|
||||
port, protocol
|
||||
);
|
||||
}
|
||||
|
||||
fn close_port(&self, port: u16, protocol: &str) {
|
||||
println!(
|
||||
"Closing port {} with protocol {} on OKD firewall",
|
||||
port, protocol
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl RouterCapability for OKDHaClusterTopology {
|
||||
fn configure_route(&self, service: &str, hostname: &str) {
|
||||
println!(
|
||||
"Configuring route for service {} with hostname {} on OKD",
|
||||
service, hostname
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
struct K3DTopology {
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl K3DTopology {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
cluster_name: "k3d-local".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for K3DTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.cluster_name
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sCapability for K3DTopology {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str) {
|
||||
println!("Deploying K8s resource on K3D cluster: {}", resource_yaml);
|
||||
}
|
||||
|
||||
fn execute_kubectl(&self, command: &str) -> String {
|
||||
println!("Executing kubectl command on K3D cluster: {}", command);
|
||||
"kubectl command output from K3D".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
struct LinuxTopology {
|
||||
hostname: String,
|
||||
}
|
||||
|
||||
impl LinuxTopology {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
hostname: "linux-host".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for LinuxTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.hostname
|
||||
}
|
||||
}
|
||||
|
||||
impl LinuxCapability for LinuxTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) -> String {
|
||||
println!(
|
||||
"Executing command '{}' with args {:?} on Linux host",
|
||||
command, args
|
||||
);
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
|
||||
println!(
|
||||
"Downloading file from {} to {} on Linux host",
|
||||
url, destination
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Score implementations
|
||||
struct LAMPScore {
|
||||
mysql_version: String,
|
||||
php_version: String,
|
||||
apache_version: String,
|
||||
}
|
||||
|
||||
impl LAMPScore {
|
||||
fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self {
|
||||
Self {
|
||||
mysql_version: mysql_version.to_string(),
|
||||
php_version: php_version.to_string(),
|
||||
apache_version: apache_version.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn execute<T: K8sCapability>(&self, topology: &T) {
|
||||
// Deploy MySQL
|
||||
topology.deploy_k8s_resource("mysql-deployment.yaml");
|
||||
|
||||
// Deploy PHP
|
||||
topology.deploy_k8s_resource("php-deployment.yaml");
|
||||
|
||||
// Deploy Apache
|
||||
topology.deploy_k8s_resource("apache-deployment.yaml");
|
||||
|
||||
// Create service
|
||||
topology.deploy_k8s_resource("lamp-service.yaml");
|
||||
|
||||
// Check deployment
|
||||
let status = topology.execute_kubectl("get pods -l app=lamp");
|
||||
println!("LAMP deployment status: {}", status);
|
||||
}
|
||||
}
|
||||
|
||||
struct BinaryScore {
|
||||
url: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
impl BinaryScore {
|
||||
fn new(url: &str, args: Vec<&str>) -> Self {
|
||||
Self {
|
||||
url: url.to_string(),
|
||||
args: args.iter().map(|s| s.to_string()).collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn execute<T: LinuxCapability>(&self, topology: &T) {
|
||||
let destination = "/tmp/binary";
|
||||
|
||||
match topology.download_file(&self.url, destination) {
|
||||
Ok(_) => {
|
||||
println!("Binary downloaded successfully");
|
||||
|
||||
// Convert args to slice of &str
|
||||
let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
|
||||
|
||||
// Execute the binary
|
||||
topology.execute_command(destination, &args);
|
||||
println!("Binary execution completed");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to download binary: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct LoadBalancerScore {
|
||||
services: Vec<String>,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
impl LoadBalancerScore {
|
||||
fn new(services: Vec<&str>, port: u16) -> Self {
|
||||
Self {
|
||||
services: services.iter().map(|s| s.to_string()).collect(),
|
||||
port,
|
||||
}
|
||||
}
|
||||
|
||||
fn execute<T: LoadBalancerCapability>(&self, topology: &T) {
|
||||
println!("Configuring load balancer for services");
|
||||
|
||||
// Convert services to slice of &str
|
||||
let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect();
|
||||
|
||||
// Configure load balancer
|
||||
topology.configure_load_balancer(&services, self.port);
|
||||
|
||||
// Check status
|
||||
let status = topology.get_load_balancer_status();
|
||||
println!("Load balancer status: {}", status);
|
||||
}
|
||||
}
|
||||
34
adr/003-abstractions/topology2/src/main_v1.rs
Normal file
34
adr/003-abstractions/topology2/src/main_v1.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
fn main() {}
|
||||
|
||||
trait Topology {}
|
||||
|
||||
struct DummyTopology {}
|
||||
|
||||
impl Topology for DummyTopology {}
|
||||
|
||||
impl Topology for LampTopology {}
|
||||
|
||||
struct LampTopology {}
|
||||
|
||||
struct Maestro {
|
||||
topology: Box<dyn Topology>,
|
||||
}
|
||||
|
||||
trait Score {
|
||||
type Topology: Topology;
|
||||
fn execute(&self, topology: &Self::Topology);
|
||||
}
|
||||
|
||||
struct K8sScore {}
|
||||
impl Score for K8sScore {
|
||||
type Topology = LampTopology;
|
||||
fn execute(&self, topology: &Box<dyn Self::Topology>) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Maestro {
|
||||
pub fn execute<T: Topology>(&self, score: Box<dyn Score<Topology = T>>) {
|
||||
score.execute(&self.topology);
|
||||
}
|
||||
}
|
||||
76
adr/003-abstractions/topology2/src/main_v2.rs
Normal file
76
adr/003-abstractions/topology2/src/main_v2.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
fn main() {
|
||||
// Example usage
|
||||
let lamp_topology = LampTopology {};
|
||||
let k8s_score = K8sScore {};
|
||||
let docker_topology = DockerTopology{};
|
||||
|
||||
// Type-safe execution
|
||||
let maestro = Maestro::new(Box::new(docker_topology));
|
||||
maestro.execute(&k8s_score); // This will work
|
||||
|
||||
// This would fail at compile time if we tried:
|
||||
// let dummy_topology = DummyTopology {};
|
||||
// let maestro = Maestro::new(Box::new(dummy_topology));
|
||||
// maestro.execute(&k8s_score); // Error: expected LampTopology, found DummyTopology
|
||||
}
|
||||
|
||||
// Base trait for all topologies
|
||||
trait Topology {
|
||||
// Common topology methods could go here
|
||||
fn topology_type(&self) -> &str;
|
||||
}
|
||||
|
||||
struct DummyTopology {}
|
||||
impl Topology for DummyTopology {
|
||||
fn topology_type(&self) -> &str { "Dummy" }
|
||||
}
|
||||
|
||||
struct LampTopology {}
|
||||
impl Topology for LampTopology {
|
||||
fn topology_type(&self) -> &str { "LAMP" }
|
||||
}
|
||||
|
||||
struct DockerTopology {}
|
||||
|
||||
impl Topology for DockerTopology {
|
||||
fn topology_type(&self) -> &str {
|
||||
todo!("DockerTopology")
|
||||
}
|
||||
}
|
||||
|
||||
// The Score trait with an associated type for the required topology
|
||||
trait Score {
|
||||
type RequiredTopology: Topology + ?Sized;
|
||||
fn execute(&self, topology: &Self::RequiredTopology);
|
||||
fn score_type(&self) -> &str;
|
||||
}
|
||||
|
||||
// A score that requires LampTopology
|
||||
struct K8sScore {}
|
||||
impl Score for K8sScore {
|
||||
type RequiredTopology = DockerTopology;
|
||||
|
||||
fn execute(&self, topology: &Self::RequiredTopology) {
|
||||
println!("Executing K8sScore on {} topology", topology.topology_type());
|
||||
// Implementation details...
|
||||
}
|
||||
|
||||
fn score_type(&self) -> &str { "K8s" }
|
||||
}
|
||||
|
||||
// A generic maestro that can work with any topology type
|
||||
struct Maestro<T: Topology + ?Sized> {
|
||||
topology: Box<T>,
|
||||
}
|
||||
|
||||
impl<T: Topology + ?Sized> Maestro<T> {
|
||||
pub fn new(topology: Box<T>) -> Self {
|
||||
Maestro { topology }
|
||||
}
|
||||
|
||||
// Execute a score that requires this specific topology type
|
||||
pub fn execute<S: Score<RequiredTopology = T>>(&self, score: &S) {
|
||||
println!("Maestro executing {} score", score.score_type());
|
||||
score.execute(&*self.topology);
|
||||
}
|
||||
}
|
||||
360
adr/003-abstractions/topology2/src/main_v4.rs
Normal file
360
adr/003-abstractions/topology2/src/main_v4.rs
Normal file
@@ -0,0 +1,360 @@
|
||||
fn main() {
|
||||
// Create topologies
|
||||
let okd_topology = OKDHaClusterTopology::new();
|
||||
let k3d_topology = K3DTopology::new();
|
||||
let linux_topology = LinuxTopology::new();
|
||||
|
||||
// Create scores - boxing them as trait objects for dynamic dispatch
|
||||
let scores: Vec<Box<dyn Score>> = vec![
|
||||
Box::new(LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4")),
|
||||
Box::new(BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"])),
|
||||
Box::new(LoadBalancerScore::new(vec!["service1", "service2"], 80)),
|
||||
];
|
||||
|
||||
// Running scores on OKD topology (which has all capabilities)
|
||||
println!("\n=== Running all scores on OKD HA Cluster ===");
|
||||
for score in &scores {
|
||||
match score.execute(&okd_topology) {
|
||||
Ok(result) => println!("Score executed successfully: {}", result),
|
||||
Err(e) => println!("Failed to execute score: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
// Running scores on K3D topology (only has K8s capability)
|
||||
println!("\n=== Running scores on K3D Cluster ===");
|
||||
for score in &scores {
|
||||
match score.execute(&k3d_topology) {
|
||||
Ok(result) => println!("Score executed successfully: {}", result),
|
||||
Err(e) => println!("Failed to execute score: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
// Running scores on Linux topology (only has Linux capability)
|
||||
println!("\n=== Running scores on Linux Host ===");
|
||||
for score in &scores {
|
||||
match score.execute(&linux_topology) {
|
||||
Ok(result) => println!("Score executed successfully: {}", result),
|
||||
Err(e) => println!("Failed to execute score: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Base Topology trait
|
||||
trait Topology: Any {
|
||||
fn name(&self) -> &str;
|
||||
|
||||
// This method allows us to get type information at runtime
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
// Use Any trait for runtime type checking
|
||||
use std::any::Any;
|
||||
|
||||
// Define capabilities
|
||||
trait K8sCapability {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str);
|
||||
fn execute_kubectl(&self, command: &str) -> String;
|
||||
}
|
||||
|
||||
trait OKDCapability: K8sCapability {
|
||||
fn execute_oc(&self, command: &str) -> String;
|
||||
}
|
||||
|
||||
trait LinuxCapability {
|
||||
fn execute_command(&self, command: &str, args: &[&str]);
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
|
||||
}
|
||||
|
||||
trait LoadBalancerCapability {
|
||||
fn configure_load_balancer(&self, services: &[&str], port: u16);
|
||||
fn get_load_balancer_status(&self) -> String;
|
||||
}
|
||||
|
||||
// Base Score trait with dynamic dispatch
|
||||
trait Score {
|
||||
// Generic execute method that takes any topology
|
||||
fn execute(&self, topology: &dyn Topology) -> Result<String, String>;
|
||||
|
||||
// Optional method to get score type for better error messages
|
||||
fn score_type(&self) -> &str;
|
||||
}
|
||||
|
||||
// Topology implementations
|
||||
struct OKDHaClusterTopology {
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl OKDHaClusterTopology {
|
||||
fn new() -> Self {
|
||||
Self { cluster_name: "okd-ha-cluster".to_string() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for OKDHaClusterTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.cluster_name
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sCapability for OKDHaClusterTopology {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str) {
|
||||
println!("Deploying K8s resource on OKD cluster: {}", resource_yaml);
|
||||
}
|
||||
|
||||
fn execute_kubectl(&self, command: &str) -> String {
|
||||
println!("Executing kubectl command on OKD cluster: {}", command);
|
||||
"kubectl command output".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl OKDCapability for OKDHaClusterTopology {
|
||||
fn execute_oc(&self, command: &str) -> String {
|
||||
println!("Executing oc command on OKD cluster: {}", command);
|
||||
"oc command output".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl LinuxCapability for OKDHaClusterTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) {
|
||||
println!("Executing command '{}' with args {:?} on OKD node", command, args);
|
||||
}
|
||||
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
|
||||
println!("Downloading file from {} to {} on OKD node", url, destination);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl LoadBalancerCapability for OKDHaClusterTopology {
|
||||
fn configure_load_balancer(&self, services: &[&str], port: u16) {
|
||||
println!("Configuring load balancer for services {:?} on port {} in OKD", services, port);
|
||||
}
|
||||
|
||||
fn get_load_balancer_status(&self) -> String {
|
||||
"OKD Load Balancer: HEALTHY".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
struct K3DTopology {
|
||||
cluster_name: String,
|
||||
}
|
||||
|
||||
impl K3DTopology {
|
||||
fn new() -> Self {
|
||||
Self { cluster_name: "k3d-local".to_string() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for K3DTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.cluster_name
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sCapability for K3DTopology {
|
||||
fn deploy_k8s_resource(&self, resource_yaml: &str) {
|
||||
println!("Deploying K8s resource on K3D cluster: {}", resource_yaml);
|
||||
}
|
||||
|
||||
fn execute_kubectl(&self, command: &str) -> String {
|
||||
println!("Executing kubectl command on K3D cluster: {}", command);
|
||||
"kubectl command output from K3D".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
struct LinuxTopology {
|
||||
hostname: String,
|
||||
}
|
||||
|
||||
impl LinuxTopology {
|
||||
fn new() -> Self {
|
||||
Self { hostname: "linux-host".to_string() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Topology for LinuxTopology {
|
||||
fn name(&self) -> &str {
|
||||
&self.hostname
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LinuxCapability for LinuxTopology {
|
||||
fn execute_command(&self, command: &str, args: &[&str]) {
|
||||
println!("Executing command '{}' with args {:?} on Linux host", command, args);
|
||||
}
|
||||
|
||||
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
|
||||
println!("Downloading file from {} to {} on Linux host", url, destination);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Score implementations using dynamic capability checks
|
||||
struct LAMPScore {
|
||||
mysql_version: String,
|
||||
php_version: String,
|
||||
apache_version: String,
|
||||
}
|
||||
|
||||
impl LAMPScore {
|
||||
fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self {
|
||||
Self {
|
||||
mysql_version: mysql_version.to_string(),
|
||||
php_version: php_version.to_string(),
|
||||
apache_version: apache_version.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper method for typesafe execution
|
||||
fn execute_with_k8s(&self, topology: &dyn K8sCapability) -> String {
|
||||
println!("Deploying LAMP stack with MySQL {}, PHP {}, Apache {}",
|
||||
self.mysql_version, self.php_version, self.apache_version);
|
||||
|
||||
// Deploy MySQL
|
||||
topology.deploy_k8s_resource("mysql-deployment.yaml");
|
||||
|
||||
// Deploy PHP
|
||||
topology.deploy_k8s_resource("php-deployment.yaml");
|
||||
|
||||
// Deploy Apache
|
||||
topology.deploy_k8s_resource("apache-deployment.yaml");
|
||||
|
||||
// Create service
|
||||
topology.deploy_k8s_resource("lamp-service.yaml");
|
||||
|
||||
// Check deployment
|
||||
let status = topology.execute_kubectl("get pods -l app=lamp");
|
||||
format!("LAMP deployment status: {}", status)
|
||||
}
|
||||
}
|
||||
|
||||
impl Score for LAMPScore {
|
||||
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
|
||||
// Try to downcast to K8sCapability
|
||||
if let Some(k8s_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
|
||||
Ok(self.execute_with_k8s(k8s_topology))
|
||||
} else if let Some(k8s_topology) = topology.as_any().downcast_ref::<K3DTopology>() {
|
||||
Ok(self.execute_with_k8s(k8s_topology))
|
||||
} else {
|
||||
Err(format!("LAMPScore requires K8sCapability but topology {} doesn't provide it",
|
||||
topology.name()))
|
||||
}
|
||||
}
|
||||
|
||||
fn score_type(&self) -> &str {
|
||||
"LAMP"
|
||||
}
|
||||
}
|
||||
|
||||
struct BinaryScore {
|
||||
url: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
impl BinaryScore {
|
||||
fn new(url: &str, args: Vec<&str>) -> Self {
|
||||
Self {
|
||||
url: url.to_string(),
|
||||
args: args.iter().map(|s| s.to_string()).collect(),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper method for typesafe execution
|
||||
fn execute_with_linux(&self, topology: &dyn LinuxCapability) -> Result<String, String> {
|
||||
let destination = "/tmp/binary";
|
||||
|
||||
// Download the binary
|
||||
println!("Preparing to run binary from {}", self.url);
|
||||
|
||||
match topology.download_file(&self.url, destination) {
|
||||
Ok(_) => {
|
||||
println!("Binary downloaded successfully");
|
||||
|
||||
// Convert args to slice of &str
|
||||
let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
|
||||
|
||||
// Execute the binary
|
||||
topology.execute_command(destination, &args);
|
||||
Ok("Binary execution completed successfully".to_string())
|
||||
},
|
||||
Err(e) => {
|
||||
Err(format!("Failed to download binary: {}", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Score for BinaryScore {
|
||||
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
|
||||
// Try to downcast to LinuxCapability
|
||||
if let Some(linux_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
|
||||
self.execute_with_linux(linux_topology)
|
||||
} else if let Some(linux_topology) = topology.as_any().downcast_ref::<LinuxTopology>() {
|
||||
self.execute_with_linux(linux_topology)
|
||||
} else {
|
||||
Err(format!("BinaryScore requires LinuxCapability but topology {} doesn't provide it",
|
||||
topology.name()))
|
||||
}
|
||||
}
|
||||
|
||||
fn score_type(&self) -> &str {
|
||||
"Binary"
|
||||
}
|
||||
}
|
||||
|
||||
struct LoadBalancerScore {
|
||||
services: Vec<String>,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
impl LoadBalancerScore {
|
||||
fn new(services: Vec<&str>, port: u16) -> Self {
|
||||
Self {
|
||||
services: services.iter().map(|s| s.to_string()).collect(),
|
||||
port,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper method for typesafe execution
|
||||
fn execute_with_lb(&self, topology: &dyn LoadBalancerCapability) -> String {
|
||||
println!("Configuring load balancer for services");
|
||||
|
||||
// Convert services to slice of &str
|
||||
let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect();
|
||||
|
||||
// Configure load balancer
|
||||
topology.configure_load_balancer(&services, self.port);
|
||||
|
||||
// Check status
|
||||
let status = topology.get_load_balancer_status();
|
||||
format!("Load balancer configured successfully. Status: {}", status)
|
||||
}
|
||||
}
|
||||
|
||||
impl Score for LoadBalancerScore {
|
||||
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
|
||||
// Only OKDHaClusterTopology implements LoadBalancerCapability
|
||||
if let Some(lb_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
|
||||
Ok(self.execute_with_lb(lb_topology))
|
||||
} else {
|
||||
Err(format!("LoadBalancerScore requires LoadBalancerCapability but topology {} doesn't provide it",
|
||||
topology.name()))
|
||||
}
|
||||
}
|
||||
|
||||
fn score_type(&self) -> &str {
|
||||
"LoadBalancer"
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,18 @@
|
||||
**Architecture Decision Record: Harmony Infrastructure Abstractions**
|
||||
## Architecture Decision Record: Core Harmony Infrastructure Abstractions
|
||||
|
||||
**Status**: Proposed
|
||||
## Status
|
||||
|
||||
**Context**: Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases.
|
||||
Proposed
|
||||
|
||||
**Decision**: We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations.
|
||||
## Context
|
||||
|
||||
**Example: Database Abstraction**
|
||||
Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases.
|
||||
|
||||
## Decision
|
||||
|
||||
We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations.
|
||||
|
||||
### Example: Database Abstraction
|
||||
|
||||
To deploy a database to any cloud provider, we define an abstraction that includes essential elements such as:
|
||||
```rust
|
||||
|
||||
80
adr/005-interactive-project.md
Normal file
80
adr/005-interactive-project.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Architecture Decision Record: Interactive project setup for automated delivery pipeline of various codebases
|
||||
|
||||
## Status
|
||||
|
||||
Proposal
|
||||
|
||||
## Context
|
||||
|
||||
Many categories of developers, of which we will focus on LAMP (Linux Apache, MySQL, PHP) developers at first, are underserved by modern delivery tools.
|
||||
|
||||
Most of these projects are developed with a small team, small budget, but still are mission critical to their users.
|
||||
|
||||
We believe that Harmony, with its end-to-end infrastructure orchestration approach, enables relatively easy integration for this category of projects in a modern delivery pipeline that is opinionated enough that the development team is not overwhelmed by choices, but also flexible enough to allow them to deploy their application according to their habits. This inclues local development, managed dedicated servers, virtualized environments, manual dashboards like CPanel, cloud providers, etc.
|
||||
|
||||
To enable this, we need to provide an easy way for developers to step on to the harmony pipeline without disrupting their workflow.
|
||||
|
||||
This ADR will outline the approach taken to go from a LAMP project to be standalone, to a LAMP project using harmony that can benefit from all the enterprise grade features of our opinionated delivery pipeline including :
|
||||
|
||||
- Automated environment provisionning (local, staging, uat, prod)
|
||||
- Infrastructure optimized for the delivery stage
|
||||
- Production with automated backups
|
||||
- Automated domain names for early stages, configured domain name for production
|
||||
- SSL certificates
|
||||
- Secret management
|
||||
- SSO integration
|
||||
- IDP, IDS security
|
||||
- Monitoring, logging
|
||||
- Artifact registry
|
||||
- Automated deployment and rollback
|
||||
- Dependency management (databases, configuration, scripts)
|
||||
|
||||
## Decision
|
||||
|
||||
|
||||
# Custom Rust DSL
|
||||
|
||||
We decided to develop a rust based DSL. Even though this means people might be "afraid of Rust", we believe the numerous advantages are worth the risk.
|
||||
|
||||
The main selection criterias are :
|
||||
|
||||
- Robustness : the application/infrastructure definition should not be fragile to typos or versioning. Rusts robust dependency management (cargo) and type safety are best in class for robustness
|
||||
- Flexibility : Writing the definition in a standard programming language empowers users to easily leverage the internals of harmony to adapt the code to their needs.
|
||||
- Extensibility : Once again, a standard programming language enables easily importing a configuration, or multiple configurations, create reusable bits, and build upon the different components to really take control over a complex multi-project deployment without going crazy because of a typo in a yaml definition that changed 4 years ago
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Complete control over the syntax and semantics of the DSL, tailored specifically to our needs.
|
||||
- Potential for better performance optimizations as we can implement exactly what is required without additional abstractions.
|
||||
|
||||
### Negative
|
||||
|
||||
- Higher initial development cost due to building a new language from scratch.
|
||||
- Steeper learning curve for developers who need to use the DSL.
|
||||
- Lack of an existing community and ecosystem, which could slow down adoption.
|
||||
- Increased maintenance overhead as the DSL needs to be updated and supported internally.
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
### Score spec
|
||||
|
||||
We considered integrating with the score-spec project : https://github.com/score-spec/spec
|
||||
|
||||
The idea was to benefit from an existing community and ecosystem. The motivations to consider score were the following :
|
||||
|
||||
- It is a CNCF project, which helps a lot with adoption and community building
|
||||
- It already supports important targets for us including docker-compose and k8s
|
||||
- It provides a way to define the application's infrastructure at the correct level of abstraction for us to deploy it anywhere -- that is the goal of the score-spec project
|
||||
- Once we evolve, we can simply have a score compatible provider that allows any project with a score spec to be deployed on the harmony stack
|
||||
- Score was built with enterprise use-cases in mind : Humanitec platform engineering customers
|
||||
|
||||
|
||||
Positive Consequences
|
||||
|
||||
- Score Community is growing, using harmony will be very easy for them
|
||||
|
||||
Negative Consequences
|
||||
|
||||
- Score is not that big yet, and mostly used by Humanitec's clients (I guess), which is a hard to penetrate environment
|
||||
@@ -5,6 +5,7 @@
|
||||
Proposed
|
||||
|
||||
### TODO [#3](https://git.nationtech.io/NationTech/harmony/issues/3):
|
||||
|
||||
Before accepting this proposal we need to run a POC to validate this potential issue :
|
||||
|
||||
**Keycloak Misuse**: Using Keycloak primarily as a secrets manager is inappropriate, as it's designed for identity and access management (IAM), not secrets management. This creates scalability and functionality limitations.
|
||||
|
||||
65
adr/007-default-runtime.md
Normal file
65
adr/007-default-runtime.md
Normal file
@@ -0,0 +1,65 @@
|
||||
## Architecture Decision Record: Default Runtime for Managed Workloads
|
||||
|
||||
### Status
|
||||
|
||||
Proposed
|
||||
|
||||
### Context
|
||||
|
||||
Our infrastructure orchestrator manages workloads requiring a Kubernetes-compatible runtime environment.
|
||||
|
||||
**Requirements**
|
||||
|
||||
- Cross-platform (Linux, Windows, macOS)
|
||||
- Kubernetes compatibility
|
||||
- Lightweight, easy setup with minimal dependencies
|
||||
- Clean host teardown and minimal residue
|
||||
- Well-maintained and actively supported
|
||||
|
||||
### Decision
|
||||
|
||||
We select **k3d (k3s in Docker)** as our default runtime environment across all supported platforms (Linux, Windows, macOS).
|
||||
|
||||
### Rationale
|
||||
|
||||
- **Consistency Across Platforms:**
|
||||
One solution for all platforms simplifies development, supports documentation, and reduces complexity.
|
||||
|
||||
- **Simplified Setup and Teardown:**
|
||||
k3d runs Kubernetes clusters in Docker containers, allowing quick setup, teardown, and minimal host residue.
|
||||
|
||||
- **Leveraging Existing Container Ecosystem:**
|
||||
Docker/container runtimes are widely adopted, making their presence and familiarity common among users.
|
||||
|
||||
- **Kubernetes Compatibility:**
|
||||
k3s (within k3d) is fully Kubernetes-certified, ensuring compatibility with standard Kubernetes tools and manifests.
|
||||
|
||||
- **Active Maintenance and Community:**
|
||||
k3d and k3s both have active communities and are well-maintained.
|
||||
|
||||
### Consequences
|
||||
|
||||
#### Positive
|
||||
|
||||
- **Uniform User Experience:** Users have a consistent setup experience across all platforms.
|
||||
- **Reduced Support Overhead:** Standardizing runtime simplifies support, documentation, and troubleshooting.
|
||||
- **Clean Isolation:** Containerization allows developers to easily clean up clusters without affecting host systems.
|
||||
- **Facilitates Multi-Cluster Development:** Easy creation and management of multiple clusters concurrently.
|
||||
|
||||
#### Negative
|
||||
|
||||
- **Docker Dependency:** Requires Docker (or compatible runtime) on all platforms.
|
||||
- **Potential Overhead:** Slight performance/resource overhead compared to native k3s.
|
||||
- **Docker Licensing Considerations:** Enterprise licensing of Docker Desktop could introduce additional considerations.
|
||||
|
||||
### Alternatives Considered
|
||||
|
||||
- **Native k3s (Linux) / k3d (Windows/macOS):** Original proposal. Rejected for greater simplicity and consistency.
|
||||
- **Minikube, MicroK8s, Kind:** Rejected due to complexity, resource usage, or narrower use-case focus.
|
||||
- **Docker Compose, Podman Desktop:** Rejected due to lack of orchestration or current limited k3d compatibility.
|
||||
|
||||
### Future Work
|
||||
|
||||
- Evaluate Podman Desktop or other container runtimes to avoid Docker dependency.
|
||||
- Continuously monitor k3d maturity and stability.
|
||||
- Investigate WebAssembly (WASM) runtimes as emerging alternatives for containerized workloads.
|
||||
62
adr/008-score-display-formatting.md
Normal file
62
adr/008-score-display-formatting.md
Normal file
@@ -0,0 +1,62 @@
|
||||
## Architecture Decision Record: Data Representation and UI Rendering for Score Types
|
||||
|
||||
**Status:** Proposed
|
||||
|
||||
**TL;DR:** `Score` types will be serialized (using `serde`) for presentation in UIs. This decouples data definition from presentation, improving scalability and reducing complexity for developers defining `Score` types. New UI types only need to handle existing field types, and new `Score` types don’t require UI changes as long as they use existing field types. Adding a new field type *does* require updates to all UIs.
|
||||
|
||||
**Key benefits:** Scalability, reduced complexity for `Score` authors, decoupling of data and presentation.
|
||||
|
||||
**Key trade-off:** Adding new field types requires updating all UIs.
|
||||
|
||||
---
|
||||
|
||||
**Context:**
|
||||
|
||||
Harmony is a pure Rust infrastructure orchestrator focused on compile-time safety and providing a developer-friendly, Ansible-module-like experience for defining infrastructure configurations via "Scores". These Scores (e.g., `LAMPScore`) are Rust structs composed of specific, strongly-typed fields (e.g., `VersionField`, `UrlField`, `PathField`) which are validated at compile-time using macros (`Version!`, `Url!`, etc.).
|
||||
|
||||
A key requirement is displaying the configuration defined in these Scores across various user interfaces (Web UI, TUI, potentially Mobile UI, etc.) in a consistent and type-safe manner. As the number of Score types is expected to grow significantly (hundreds or thousands), we need a scalable approach for rendering their data that avoids tightly coupling Score definitions to specific UI implementations.
|
||||
|
||||
The primary challenge is preventing the need for every `Score` struct author to implement multiple display traits (e.g., `Display`, `WebDisplay`, `TuiDisplay`) for every potential UI target. This would create an N x M complexity problem (N Scores * M UI types) and place an unreasonable burden on Score developers, hindering scalability and maintainability.
|
||||
|
||||
**Decision:**
|
||||
|
||||
1. **Mandatory Serialization:** All `Score` structs *must* implement `serde::Serialize` and `serde::Deserialize`. They *will not* be required to implement `std::fmt::Display` or any custom UI-specific display traits (e.g., `WebDisplay`, `TuiDisplay`).
|
||||
2. **Field-Level Rendering:** Responsibility for rendering data will reside within the UI components. Each UI (Web, TUI, etc.) will implement logic to display *individual field types* (e.g., `UrlField`, `VersionField`, `IpAddressField`, `SecretField`).
|
||||
3. **Data Access via Serialization:** UIs will primarily interact with `Score` data through its serialized representation (e.g., JSON obtained via `serde_json`). This provides a standardized interface for UIs to consume the data structure agnostic of the specific `Score` type. Alternatively, UIs *could* potentially use reflection or specific visitor patterns on the `Score` struct itself, but serialization is the preferred decoupling mechanism.
|
||||
|
||||
**Rationale:**
|
||||
|
||||
1. **Decoupling Data from Presentation:** This decision cleanly separates the data definition (`Score` structs and their fields) from the presentation logic (UI rendering). `Score` authors can focus solely on defining the data and its structure, while UI developers focus on how to best present known data *types*.
|
||||
2. **Scalability:** This approach scales significantly better than requiring display trait implementations on Scores:
|
||||
* Adding a *new Score type* requires *no changes* to existing UI code, provided it uses existing field types.
|
||||
* Adding a *new UI type* requires implementing rendering logic only for the defined set of *field types*, not for every individual `Score` type. This reduces the N x M complexity to N + M complexity (approximately).
|
||||
3. **Simplicity for Score Authors:** Requiring only `serde::Serialize + Deserialize` (which can often be derived automatically with `#[derive(Serialize, Deserialize)]`) is a much lower burden than implementing custom rendering logic for multiple, potentially unknown, UI targets.
|
||||
4. **Leverages Rust Ecosystem Standards:** `serde` is the de facto standard for serialization and deserialization in Rust. Relying on it aligns with common Rust practices and benefits from its robustness, performance, and extensive tooling.
|
||||
5. **Consistency for UIs:** Serialization provides a consistent, structured format (like JSON) for UIs to consume data, regardless of the underlying `Score` struct's complexity or composition.
|
||||
6. **Flexibility for UI Implementation:** UIs can choose the best way to render each field type based on their capabilities (e.g., a `UrlField` might be a clickable link in a Web UI, plain text in a TUI; a `SecretField` might be masked).
|
||||
|
||||
**Consequences:**
|
||||
|
||||
**Positive:**
|
||||
|
||||
* Greatly improved scalability for adding new Score types and UI targets.
|
||||
* Strong separation of concerns between data definition and presentation.
|
||||
* Reduced implementation burden and complexity for Score authors.
|
||||
* Consistent mechanism for UIs to access and interpret Score data.
|
||||
* Aligns well with the Hexagonal Architecture (ADR-002) by treating UIs as adapters interacting with the application core via a defined port (the serialized data contract).
|
||||
|
||||
**Negative:**
|
||||
|
||||
* Adding a *new field type* (e.g., `EmailField`) requires updates to *all* existing UI implementations to support rendering it.
|
||||
* UI components become dependent on the set of defined field types and need comprehensive logic to handle each one appropriately.
|
||||
* Potential minor overhead of serialization/deserialization compared to direct function calls (though likely negligible for UI purposes).
|
||||
* Requires careful design and management of the standard library of field types.
|
||||
|
||||
**Alternatives Considered:**
|
||||
|
||||
1. **`Score` Implements `std::fmt::Display`:**
|
||||
* _Rejected:_ Too simplistic. Only suitable for basic text rendering, doesn't cater to structured UIs (Web, etc.), and doesn't allow type-specific rendering logic (e.g., masking secrets). Doesn't scale to multiple UI formats.
|
||||
2. **`Score` Implements Multiple Custom Display Traits (`WebDisplay`, `TuiDisplay`, etc.):**
|
||||
* _Rejected:_ Leads directly to the N x M complexity problem. Tightly couples Score definitions to specific UI implementations. Places an excessive burden on Score authors, hindering adoption and scalability.
|
||||
3. **Generic Display Trait with Context (`Score` implements `DisplayWithContext<UIContext>`):**
|
||||
* _Rejected:_ More flexible than multiple traits, but still requires Score authors to implement potentially complex rendering logic within the `Score` definition itself. The `Score` would still need awareness of different UI contexts, leading to undesirable coupling. Managing context types adds complexity.
|
||||
61
adr/009-helm-and-kustomize-handling.md
Normal file
61
adr/009-helm-and-kustomize-handling.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Architecture Decision Record: Helm and Kustomize Handling
|
||||
|
||||
Initial Author: Taha Hawa
|
||||
|
||||
Initial Date: 2025-04-15
|
||||
|
||||
Last Updated Date: 2025-04-15
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
We need to find a way to handle Helm charts and deploy them to a Kubernetes cluster. Helm has a lot of extra functionality that we may or may not need. Kustomize handles Helm charts by inflating them and applying them as vanilla Kubernetes yaml. How should Harmony handle it?
|
||||
|
||||
## Decision
|
||||
|
||||
In order to move quickly and efficiently, Harmony should handle Helm charts similarly to how Kustomize does: invoke Helm to inflate/render the charts with the needed inputs, and deploy the rendered artifacts to Kubernetes as if it were vanilla manifests.
|
||||
|
||||
## Rationale
|
||||
|
||||
A lot of Helm's features aren't strictly necessary and would add unneeded overhead. This is likely the fastest way to go from zero to deployed. Other tools (e.g. Kustomize) already do this. Kustomize has tooling for patching and modifying k8s manifests before deploying, and Harmony should have that power too, even if it's not what Helm typically intends.
|
||||
|
||||
Perhaps in future also have a Kustomize resource in Harmony? Which could handle Helm charts for Harmony as well/instead.
|
||||
|
||||
## Consequences
|
||||
|
||||
**Pros**:
|
||||
|
||||
- Much easier (and faster) than implementing all of Helm's featureset
|
||||
- Can potentially re-use code from K8sResource already present in Harmony
|
||||
- Harmony retains more control over how the deployment goes after rendering (i.e. can act like Kustomize, or leverage Kustomize itself to modify deployments after rendering/inflation)
|
||||
- Reduce (unstable) surface of dealing with Helm binary
|
||||
|
||||
**Cons**:
|
||||
|
||||
- Lose some Helm functionality
|
||||
- Potentially lose some compatibility with Helm
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
- ### Implement Helm resouce/client fully in Harmony
|
||||
- **Pros**:
|
||||
- Retain full compatibility with Helm as a tool
|
||||
- Retain full functionality of Helm
|
||||
- **Cons**:
|
||||
- Longer dev time
|
||||
- More complex integration
|
||||
- Dealing with larger (unstable) surface of Helm as a binary
|
||||
- ### Leverage Kustomize to deal with Helm charts
|
||||
- **Pros**:
|
||||
- Already has a good, minimal inflation solution built
|
||||
- Powerful post-processing/patching
|
||||
- Can integrate with `kubectl`
|
||||
- **Cons**:
|
||||
- Unstable binary tool/surface to deal with
|
||||
- Still requires Helm to be installed as well as Kustomize
|
||||
- Not all Helm features supported
|
||||
|
||||
## Additional Notes
|
||||
73
adr/010-monitoring-alerting/architecture.rs
Normal file
73
adr/010-monitoring-alerting/architecture.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
pub trait MonitoringSystem {}
|
||||
|
||||
// 1. Modified AlertReceiver trait:
|
||||
// - Removed the problematic `clone` method.
|
||||
// - Added `box_clone` which returns a Box<dyn AlertReceiver>.
|
||||
pub trait AlertReceiver {
|
||||
type M: MonitoringSystem;
|
||||
fn install(&self, sender: &Self::M) -> Result<(), String>;
|
||||
// This method allows concrete types to clone themselves into a Box<dyn AlertReceiver>
|
||||
fn box_clone(&self) -> Box<dyn AlertReceiver<M = Self::M>>;
|
||||
}
|
||||
#[derive(Clone)]
|
||||
struct Prometheus{}
|
||||
impl MonitoringSystem for Prometheus {}
|
||||
|
||||
#[derive(Clone)] // Keep derive(Clone) for DiscordWebhook itself
|
||||
struct DiscordWebhook{}
|
||||
|
||||
impl AlertReceiver for DiscordWebhook {
|
||||
type M = Prometheus;
|
||||
fn install(&self, sender: &Self::M) -> Result<(), String> {
|
||||
// Placeholder for actual installation logic
|
||||
println!("DiscordWebhook installed for Prometheus monitoring.");
|
||||
Ok(())
|
||||
}
|
||||
// 2. Implement `box_clone` for DiscordWebhook:
|
||||
// This uses the derived `Clone` for DiscordWebhook to create a new boxed instance.
|
||||
fn box_clone(&self) -> Box<dyn AlertReceiver<M = Self::M>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Implement `std::clone::Clone` for `Box<dyn AlertReceiver<M= M>>`:
|
||||
// This allows `Box<dyn AlertReceiver>` to be cloned.
|
||||
// The `+ 'static` lifetime bound is often necessary for trait objects stored in collections,
|
||||
// ensuring they live long enough.
|
||||
impl<M: MonitoringSystem + 'static> Clone for Box<dyn AlertReceiver<M= M>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.box_clone() // Call the custom `box_clone` method
|
||||
}
|
||||
}
|
||||
|
||||
// MonitoringConfig can now derive Clone because its `receivers` field
|
||||
// (Vec<Box<dyn AlertReceiver<M = M>>>) is now cloneable.
|
||||
#[derive(Clone)]
|
||||
struct MonitoringConfig <M: MonitoringSystem + 'static>{
|
||||
receivers: Vec<Box<dyn AlertReceiver<M = M>>>
|
||||
}
|
||||
|
||||
// Example usage to demonstrate compilation and functionality
|
||||
fn main() {
|
||||
let prometheus_instance = Prometheus{};
|
||||
let discord_webhook_instance = DiscordWebhook{};
|
||||
|
||||
let mut config = MonitoringConfig {
|
||||
receivers: Vec::new()
|
||||
};
|
||||
|
||||
// Create a boxed alert receiver
|
||||
let boxed_receiver: Box<dyn AlertReceiver<M = Prometheus>> = Box::new(discord_webhook_instance);
|
||||
config.receivers.push(boxed_receiver);
|
||||
|
||||
// Clone the config, which will now correctly clone the boxed receiver
|
||||
let cloned_config = config.clone();
|
||||
|
||||
println!("Original config has {} receivers.", config.receivers.len());
|
||||
println!("Cloned config has {} receivers.", cloned_config.receivers.len());
|
||||
|
||||
// Example of using the installed receiver
|
||||
if let Some(receiver) = config.receivers.get(0) {
|
||||
let _ = receiver.install(&prometheus_instance);
|
||||
}
|
||||
}
|
||||
68
adr/010-monitoring-and-alerting.md
Normal file
68
adr/010-monitoring-and-alerting.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Architecture Decision Record: Monitoring and Alerting
|
||||
|
||||
Initial Author : Willem Rolleman
|
||||
Date : April 28 2025
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
A harmony user should be able to initialize a monitoring stack easily, either at the first run of Harmony, or that integrates with existing proects and infra without creating multiple instances of the monitoring stack or overwriting existing alerts/configurations.The user also needs a simple way to configure the stack so that it watches the projects. There should be reasonable defaults configured that are easily customizable for each project
|
||||
|
||||
## Decision
|
||||
|
||||
Create MonitoringStack score that creates a maestro to launch the monitoring stack or not if it is already present.
|
||||
The MonitoringStack score can be passed to the maestro in the vec! scores list
|
||||
|
||||
## Rationale
|
||||
|
||||
Having the score launch a maestro will allow the user to easily create a new monitoring stack and keeps composants grouped together. The MonitoringScore can handle all the logic for adding alerts, ensuring that the stack is running etc.
|
||||
|
||||
## Alerternatives considered
|
||||
|
||||
- ### Implement alerting and monitoring stack using existing HelmScore for each project
|
||||
- **Pros**:
|
||||
- Each project can choose to use the monitoring and alerting stack that they choose
|
||||
- Less overhead in terms of care harmony code
|
||||
- can add Box::new(grafana::grafanascore(namespace))
|
||||
- **Cons**:
|
||||
- No default solution implemented
|
||||
- Dev needs to chose what they use
|
||||
- Increases complexity of score projects
|
||||
- Each project will create a new monitoring and alerting instance rather than joining the existing one
|
||||
|
||||
|
||||
- ### Use OKD grafana and prometheus
|
||||
- **Pros**:
|
||||
- Minimal config to do in Harmony
|
||||
- **Cons**:
|
||||
- relies on OKD so will not working for local testing via k3d
|
||||
|
||||
- ### Create a monitoring and alerting crate similar to harmony tui
|
||||
- **Pros**:
|
||||
- Creates a default solution that can be implemented once by harmony
|
||||
- can create a join function that will allow a project to connect to the existing solution
|
||||
- eliminates risk of creating multiple instances of grafana or prometheus
|
||||
- **Cons**:
|
||||
- more complex than using a helm score
|
||||
- management of values files for individual functions becomes more complicated, ie how do you create alerts for one project via helm install that doesnt overwrite the other alerts
|
||||
|
||||
- ### Add monitoring to Maestro struct so whether the monitoring stack is used must be defined
|
||||
- **Pros**:
|
||||
- less for the user to define
|
||||
- may be easier to set defaults
|
||||
- **Cons**:
|
||||
- feels counterintuitive
|
||||
- would need to modify the structure of the maestro and how it operates which seems like a bad idea
|
||||
- unclear how to allow user to pass custom values/configs to the monitoring stack for subsequent projects
|
||||
|
||||
- ### Create MonitoringStack score to add to scores vec! which loads a maestro to install stack if not ready or add custom endpoints/alerts to existing stack
|
||||
- **Pros**:
|
||||
- Maestro already accepts a list of scores to initialize
|
||||
- leaving out the monitoring score simply means the user does not want monitoring
|
||||
- if the monitoring stack is already created, the MonitoringStack score doesn't necessarily need to be added to each project
|
||||
- composants of the monitoring stack are bundled together and can be expaned or modified from the same place
|
||||
- **Cons**:
|
||||
- maybe need to create
|
||||
161
adr/011-multi-tenant-cluster.md
Normal file
161
adr/011-multi-tenant-cluster.md
Normal file
@@ -0,0 +1,161 @@
|
||||
# Architecture Decision Record: Multi-Tenancy Strategy for Harmony Managed Clusters
|
||||
|
||||
Initial Author: Jean-Gabriel Gill-Couture
|
||||
|
||||
Initial Date: 2025-05-26
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
Harmony manages production OKD/Kubernetes clusters that serve multiple clients with varying trust levels and operational requirements. We need a multi-tenancy strategy that provides:
|
||||
|
||||
1. **Strong isolation** between client workloads while maintaining operational simplicity
|
||||
2. **Controlled API access** allowing clients self-service capabilities within defined boundaries
|
||||
3. **Security-first approach** protecting both the cluster infrastructure and tenant data
|
||||
4. **Harmony-native implementation** using our Score/Interpret pattern for automated tenant provisioning
|
||||
5. **Scalable management** supporting both small trusted clients and larger enterprise customers
|
||||
|
||||
The official Kubernetes multi-tenancy documentation identifies two primary models: namespace-based isolation and virtual control planes per tenant. Given Harmony's focus on operational simplicity, provider-agnostic abstractions (ADR-003), and hexagonal architecture (ADR-002), we must choose an approach that balances security, usability, and maintainability.
|
||||
|
||||
Our clients represent a hybrid tenancy model:
|
||||
- **Customer multi-tenancy**: Each client operates independently with no cross-tenant trust
|
||||
- **Team multi-tenancy**: Individual clients may have multiple team members requiring coordinated access
|
||||
- **API access requirement**: Unlike pure SaaS scenarios, clients need controlled Kubernetes API access for self-service operations
|
||||
|
||||
The official kubernetes documentation on multi tenancy heavily inspired this ADR : https://kubernetes.io/docs/concepts/security/multi-tenancy/
|
||||
|
||||
## Decision
|
||||
|
||||
Implement **namespace-based multi-tenancy** with the following architecture:
|
||||
|
||||
### 1. Network Security Model
|
||||
- **Private cluster access**: Kubernetes API and OpenShift console accessible only via WireGuard VPN
|
||||
- **No public exposure**: Control plane endpoints remain internal to prevent unauthorized access attempts
|
||||
- **VPN-based authentication**: Initial access control through WireGuard client certificates
|
||||
|
||||
### 2. Tenant Isolation Strategy
|
||||
- **Dedicated namespace per tenant**: Each client receives an isolated namespace with access limited only to the required resources and operations
|
||||
- **Complete network isolation**: NetworkPolicies prevent cross-namespace communication while allowing full egress to public internet
|
||||
- **Resource governance**: ResourceQuotas and LimitRanges enforce CPU, memory, and storage consumption limits
|
||||
- **Storage access control**: Clients can create PersistentVolumeClaims but cannot directly manipulate PersistentVolumes or access other tenants' storage
|
||||
|
||||
### 3. Access Control Framework
|
||||
- **Principle of Least Privilege**: RBAC grants only necessary permissions within tenant namespace scope
|
||||
- **Namespace-scoped**: Clients can create/modify/delete resources within their namespace
|
||||
- **Cluster-level restrictions**: No access to cluster-wide resources, other namespaces, or sensitive cluster operations
|
||||
- **Whitelisted operations**: Controlled self-service capabilities for ingress, secrets, configmaps, and workload management
|
||||
|
||||
### 4. Identity Management Evolution
|
||||
- **Phase 1**: Manual provisioning of VPN access and Kubernetes ServiceAccounts/Users
|
||||
- **Phase 2**: Migration to Keycloak-based identity management (aligning with ADR-006) for centralized authentication and lifecycle management
|
||||
|
||||
### 5. Harmony Integration
|
||||
- **TenantScore implementation**: Declarative tenant provisioning using Harmony's Score/Interpret pattern
|
||||
- **Topology abstraction**: Tenant configuration abstracted from underlying Kubernetes implementation details
|
||||
- **Automated deployment**: Complete tenant setup automated through Harmony's orchestration capabilities
|
||||
|
||||
## Rationale
|
||||
|
||||
### Network Security Through VPN Access
|
||||
- **Defense in depth**: VPN requirement adds critical security layer preventing unauthorized cluster access
|
||||
- **Simplified firewall rules**: No need for complex public endpoint protections or rate limiting
|
||||
- **Audit capability**: VPN access provides clear audit trail of cluster connections
|
||||
- **Aligns with enterprise practices**: Most enterprise customers already use VPN infrastructure
|
||||
|
||||
### Namespace Isolation vs Virtual Control Planes
|
||||
Following Kubernetes official guidance, namespace isolation provides:
|
||||
- **Lower resource overhead**: Virtual control planes require dedicated etcd, API server, and controller manager per tenant
|
||||
- **Operational simplicity**: Single control plane to maintain, upgrade, and monitor
|
||||
- **Cross-tenant service integration**: Enables future controlled cross-tenant communication if required
|
||||
- **Proven stability**: Namespace-based isolation is well-tested and widely deployed
|
||||
- **Cost efficiency**: Significantly lower infrastructure costs compared to dedicated control planes
|
||||
|
||||
### Hybrid Tenancy Model Suitability
|
||||
Our approach addresses both customer and team multi-tenancy requirements:
|
||||
- **Customer isolation**: Strong network and RBAC boundaries prevent cross-tenant interference
|
||||
- **Team collaboration**: Multiple team members can share namespace access through group-based RBAC
|
||||
- **Self-service balance**: Controlled API access enables client autonomy without compromising security
|
||||
|
||||
### Harmony Architecture Alignment
|
||||
- **Provider agnostic**: TenantScore abstracts multi-tenancy concepts, enabling future support for other Kubernetes distributions
|
||||
- **Hexagonal architecture**: Tenant management becomes an infrastructure capability accessed through well-defined ports
|
||||
- **Declarative automation**: Tenant lifecycle fully managed through Harmony's Score execution model
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive Consequences
|
||||
- **Strong security posture**: VPN + namespace isolation provides robust tenant separation
|
||||
- **Operational efficiency**: Single cluster management with automated tenant provisioning
|
||||
- **Client autonomy**: Self-service capabilities reduce operational support burden
|
||||
- **Scalable architecture**: Can support hundreds of tenants per cluster without architectural changes
|
||||
- **Future flexibility**: Foundation supports evolution to more sophisticated multi-tenancy models
|
||||
- **Cost optimization**: Shared infrastructure maximizes resource utilization
|
||||
|
||||
### Negative Consequences
|
||||
- **VPN operational overhead**: Requires VPN infrastructure management
|
||||
- **Manual provisioning complexity**: Phase 1 manual user management creates administrative burden
|
||||
- **Network policy dependency**: Requires CNI with NetworkPolicy support (OVN-Kubernetes provides this and is the OKD/Openshift default)
|
||||
- **Cluster-wide resource limitations**: Some advanced Kubernetes features require cluster-wide access
|
||||
- **Single point of failure**: Cluster outage affects all tenants simultaneously
|
||||
|
||||
### Migration Challenges
|
||||
- **Legacy client integration**: Existing clients may need VPN client setup and credential migration
|
||||
- **Monitoring complexity**: Per-tenant observability requires careful metric and log segmentation
|
||||
- **Backup considerations**: Tenant data backup must respect isolation boundaries
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### Alternative 1: Virtual Control Plane Per Tenant
|
||||
**Pros**: Complete control plane isolation, full Kubernetes API access per tenant
|
||||
**Cons**: 3-5x higher resource usage, complex cross-tenant networking, operational complexity scales linearly with tenants
|
||||
|
||||
**Rejected**: Resource overhead incompatible with cost-effective multi-tenancy goals
|
||||
|
||||
### Alternative 2: Dedicated Clusters Per Tenant
|
||||
**Pros**: Maximum isolation, independent upgrade cycles, simplified security model
|
||||
**Cons**: Exponential operational complexity, prohibitive costs, resource waste
|
||||
|
||||
**Rejected**: Operational overhead makes this approach unsustainable for multiple clients
|
||||
|
||||
### Alternative 3: Public API with Advanced Authentication
|
||||
**Pros**: No VPN requirement, potentially simpler client access
|
||||
**Cons**: Larger attack surface, complex rate limiting and DDoS protection, increased security monitoring requirements
|
||||
|
||||
**Rejected**: Risk/benefit analysis favors VPN-based access control
|
||||
|
||||
### Alternative 4: Service Mesh Based Isolation
|
||||
**Pros**: Fine-grained traffic control, encryption, advanced observability
|
||||
**Cons**: Significant operational complexity, performance overhead, steep learning curve
|
||||
|
||||
**Rejected**: Complexity overhead outweighs benefits for current requirements; remains option for future enhancement
|
||||
|
||||
## Additional Notes
|
||||
|
||||
### Implementation Roadmap
|
||||
1. **Phase 1**: Implement VPN access and manual tenant provisioning
|
||||
2. **Phase 2**: Deploy TenantScore automation for namespace, RBAC, and NetworkPolicy management
|
||||
4. **Phase 3**: Work on privilege escalation from pods, audit for weaknesses, enforce security policies on pod runtimes
|
||||
3. **Phase 4**: Integrate Keycloak for centralized identity management
|
||||
4. **Phase 5**: Add advanced monitoring and per-tenant observability
|
||||
|
||||
### TenantScore Structure Preview
|
||||
```rust
|
||||
pub struct TenantScore {
|
||||
pub tenant_config: TenantConfig,
|
||||
pub resource_quotas: ResourceQuotaConfig,
|
||||
pub network_isolation: NetworkIsolationPolicy,
|
||||
pub storage_access: StorageAccessConfig,
|
||||
pub rbac_config: RBACConfig,
|
||||
}
|
||||
```
|
||||
|
||||
### Future Enhancements
|
||||
- **Cross-tenant service mesh**: For approved inter-tenant communication
|
||||
- **Advanced monitoring**: Per-tenant Prometheus/Grafana instances
|
||||
- **Backup automation**: Tenant-scoped backup policies
|
||||
- **Cost allocation**: Detailed per-tenant resource usage tracking
|
||||
|
||||
This ADR establishes the foundation for secure, scalable multi-tenancy in Harmony-managed clusters while maintaining operational simplicity and cost effectiveness. A follow-up ADR will detail the Tenant abstraction and user management mechanisms within the Harmony framework.
|
||||
41
adr/011-tenant/NetworkPolicy.yaml
Normal file
41
adr/011-tenant/NetworkPolicy.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: tenant-isolation-policy
|
||||
namespace: testtenant
|
||||
spec:
|
||||
podSelector: {} # Selects all pods in the namespace
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {} # Allow from all pods in the same namespace
|
||||
egress:
|
||||
- to:
|
||||
- podSelector: {} # Allow to all pods in the same namespace
|
||||
- to:
|
||||
- podSelector: {}
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: openshift-dns # Target the openshift-dns namespace
|
||||
# Note, only opening port 53 is not enough, will have to dig deeper into this one eventually
|
||||
# ports:
|
||||
# - protocol: UDP
|
||||
# port: 53
|
||||
# - protocol: TCP
|
||||
# port: 53
|
||||
# Allow egress to public internet only
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
- 10.0.0.0/8 # RFC1918
|
||||
- 172.16.0.0/12 # RFC1918
|
||||
- 192.168.0.0/16 # RFC1918
|
||||
- 169.254.0.0/16 # Link-local
|
||||
- 127.0.0.0/8 # Loopback
|
||||
- 224.0.0.0/4 # Multicast
|
||||
- 240.0.0.0/4 # Reserved
|
||||
- 100.64.0.0/10 # Carrier-grade NAT
|
||||
- 0.0.0.0/8 # Reserved
|
||||
95
adr/011-tenant/TestDeployment.yaml
Normal file
95
adr/011-tenant/TestDeployment.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: testtenant
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: testtenant2
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginxinc/nginx-unprivileged
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant
|
||||
spec:
|
||||
selector:
|
||||
app: test-web
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-client
|
||||
namespace: testtenant
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-client
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-client
|
||||
spec:
|
||||
containers:
|
||||
- name: curl
|
||||
image: curlimages/curl:latest
|
||||
command: ["/bin/sh", "-c", "sleep 3600"]
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginxinc/nginx-unprivileged
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant2
|
||||
spec:
|
||||
selector:
|
||||
app: test-web
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
63
adr/012-project-delivery-automation.md
Normal file
63
adr/012-project-delivery-automation.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Architecture Decision Record: \<Title\>
|
||||
|
||||
Initial Author: Jean-Gabriel Gill-Couture
|
||||
|
||||
Initial Date: 2025-06-04
|
||||
|
||||
Last Updated Date: 2025-06-04
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
As Harmony's goal is to make software delivery easier, we must provide an easy way for developers to express their app's semantics and dependencies with great abstractions, in a similar fashion to what the score.dev project is doing.
|
||||
|
||||
Thus, we started working on ways to package common types of applications such as LAMP, which we started working on with `LAMPScore`.
|
||||
|
||||
Now is time for the next step : we want to pave the way towards complete lifecycle automation. To do this, we will start with a way to execute Harmony's modules easily from anywhere, starting with locally and in CI environments.
|
||||
|
||||
## Decision
|
||||
|
||||
To achieve easy, portable execution of Harmony, we will follow this architecture :
|
||||
|
||||
- Host a basic harmony release that is compiled with the CLI by our gitea/github server
|
||||
- This binary will do the following : check if there is a `harmony` folder in the current path
|
||||
- If yes
|
||||
- Check if cargo is available locally and compile the harmony binary, or compile the harmony binary using a rust docker container, if neither cargo or a container runtime is available, output a message explaining the situation
|
||||
- Run the newly compiled binary. (Ideally using pid handoff like exec does but some research around this should be done. I think handing off the process is to help with OS interaction such as terminal apps, signals, exit codes, process handling, etc but there might be some side effects)
|
||||
- If not
|
||||
- Suggest initializing a project by auto detecting what the project looks like
|
||||
- When the project type cannot be auto detected, provide links to Harmony's documentation on how to set up a project, a link to the examples folder, and a ask the user if he wants to initialize an empty Harmony project in the current folder
|
||||
- harmony/Cargo.toml with dependencies set
|
||||
- harmony/src/main.rs with an example LAMPScore setup and ready to run
|
||||
- This same binary can be used in a CI environment to run the target project's Harmony module. By default, we provide these opinionated steps :
|
||||
1. **An empty check step.** The purpose of this step is to run all tests and checks against the codebase. For complex projects this could involve a very complex pipeline of test environments setup and execution but this is out of scope for now. This is not handled by harmony. For projects with automatic setup, we can fill this step with something like `cargo fmt --check; cargo test; cargo build` but Harmony is not directly involved in the execution of this step.
|
||||
2. **Package and publish.** Once all checks have passed, the production ready container is built and pushed to a registry. This is done by Harmony.
|
||||
3. **Deploy to staging automatically.**
|
||||
4. **Run a sanity check on staging.** As Harmony is responsible for deploying, Harmony should have all the knowledge of how to perform a sanity check on the staging environment. This will, most of the time, be a simple verification of the kubernetes health of all deployed components, and a poke on the public endpoint when there is one.
|
||||
5. **Deploy to production automatically.** Many projects will require manual approval here, this can be easily set up in the CI afterwards, but our opinion is that
|
||||
6. **Run a sanity check on production.** Same check as staging, but on production.
|
||||
|
||||
*Note on providing a base pipeline :* Having a complete pipeline set up automatically will encourage development teams to build upon these by adding tests where they belong. The goal here is to provide an opiniated solution that works for most small and large projects. Of course, many orgnizations will need to add steps such as deploying to sandbox environments, requiring more advanced approvals, more complex publication and coordination with other projects. But this here encompasses the basics required to build and deploy software reliably at any scale.
|
||||
|
||||
### Environment setup
|
||||
|
||||
TBD : For now, environments (tenants) will be set up and configured manually. Harmony will rely on the kubeconfig provided in the environment where it is running to deploy in the namespace.
|
||||
|
||||
For the CD tool such as Argo or Flux they will be activated by default by Harmony when using application level Scores such as LAMPScore in a similar way that the container is automatically built. Then, CI deployment steps will be notifying the CD tool using its API of the new release to deploy.
|
||||
|
||||
## Rationale
|
||||
|
||||
Reasoning behind the decision
|
||||
|
||||
## Consequences
|
||||
|
||||
Pros/Cons of chosen solution
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
Pros/Cons of various proposed solutions considered
|
||||
|
||||
## Additional Notes
|
||||
78
adr/013-monitoring-notifications.md
Normal file
78
adr/013-monitoring-notifications.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Architecture Decision Record: Monitoring Notifications
|
||||
|
||||
Initial Author: Taha Hawa
|
||||
|
||||
Initial Date: 2025-06-26
|
||||
|
||||
Last Updated Date: 2025-06-26
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
We need to send notifications (typically from AlertManager/Prometheus) and we need to receive said notifications on mobile devices for sure in some way, whether it's push messages, SMS, phone call, email, etc or all of the above.
|
||||
|
||||
## Decision
|
||||
|
||||
We should go with https://ntfy.sh except host it ourselves.
|
||||
|
||||
`ntfy` is an open source solution written in Go that has the features we need.
|
||||
|
||||
## Rationale
|
||||
|
||||
`ntfy` has pretty much everything we need (push notifications, email forwarding, receives via webhook), and nothing/not much we don't. Good fit, lightweight.
|
||||
|
||||
## Consequences
|
||||
|
||||
Pros:
|
||||
|
||||
- topics, with ACLs
|
||||
- lightweight
|
||||
- reliable
|
||||
- easy to configure
|
||||
- mobile app
|
||||
- the mobile app can listen via websocket, poll, or receive via Firebase/GCM on Android, or similar on iOS.
|
||||
- Forward to email
|
||||
- Text-to-Speech phone call messages using Twilio integration
|
||||
- Operates based on simple HTTP requests/Webhooks, easily usable via AlertManager
|
||||
|
||||
Cons:
|
||||
|
||||
- No SMS pushes
|
||||
- SQLite DB, makes it harder to HA/scale
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
[AWS SNS](https://aws.amazon.com/sns/):
|
||||
Pros:
|
||||
|
||||
- highly reliable
|
||||
- no hosting needed
|
||||
|
||||
Cons:
|
||||
|
||||
- no control, not self hosted
|
||||
- costs (per usage)
|
||||
|
||||
[Apprise](https://github.com/caronc/apprise):
|
||||
Pros:
|
||||
|
||||
- Way more ways of sending notifications
|
||||
- Can use ntfy as one of the backends/ways of sending
|
||||
|
||||
Cons:
|
||||
|
||||
- Way too overkill for what we need in terms of features
|
||||
|
||||
[Gotify](https://github.com/gotify/server):
|
||||
Pros:
|
||||
|
||||
- simple, lightweight, golang, etc
|
||||
|
||||
Cons:
|
||||
|
||||
- Pushes topics are per-user
|
||||
|
||||
## Additional Notes
|
||||
17
adr/agent_discovery/mdns/Cargo.toml
Normal file
17
adr/agent_discovery/mdns/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "mdns"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
mdns-sd = "0.14"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
futures = "0.3"
|
||||
dmidecode = "0.2" # For getting the motherboard ID on the agent
|
||||
log.workspace=true
|
||||
env_logger.workspace=true
|
||||
clap = { version = "4.5.46", features = ["derive"] }
|
||||
get_if_addrs = "0.5.3"
|
||||
local-ip-address = "0.6.5"
|
||||
60
adr/agent_discovery/mdns/src/advertise.rs
Normal file
60
adr/agent_discovery/mdns/src/advertise.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// harmony-agent/src/main.rs
|
||||
|
||||
use log::info;
|
||||
use mdns_sd::{ServiceDaemon, ServiceInfo};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::SERVICE_TYPE;
|
||||
|
||||
// The service we are advertising.
|
||||
const SERVICE_PORT: u16 = 43210; // A port for the service. It needs one, even if unused.
|
||||
|
||||
pub async fn advertise() {
|
||||
info!("Starting Harmony Agent...");
|
||||
|
||||
// Get a unique ID for this machine.
|
||||
let motherboard_id = "some motherboard id";
|
||||
let instance_name = format!("harmony-agent-{}", motherboard_id);
|
||||
info!("This agent's instance name: {}", instance_name);
|
||||
info!("Advertising with ID: {}", motherboard_id);
|
||||
|
||||
// Create a new mDNS daemon.
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
||||
|
||||
// Create a TXT record HashMap to hold our metadata.
|
||||
let mut properties = HashMap::new();
|
||||
properties.insert("id".to_string(), motherboard_id.to_string());
|
||||
properties.insert("version".to_string(), "1.0".to_string());
|
||||
|
||||
// Create the service information.
|
||||
// The instance name should be unique on the network.
|
||||
let local_ip = local_ip_address::local_ip().unwrap();
|
||||
let service_info = ServiceInfo::new(
|
||||
SERVICE_TYPE,
|
||||
&instance_name,
|
||||
"harmony-host.local.", // A hostname for the service
|
||||
local_ip,
|
||||
// "0.0.0.0",
|
||||
SERVICE_PORT,
|
||||
Some(properties),
|
||||
)
|
||||
.expect("Failed to create service info");
|
||||
|
||||
// Register our service with the daemon.
|
||||
mdns.register(service_info)
|
||||
.expect("Failed to register service");
|
||||
|
||||
info!(
|
||||
"Service '{}' registered and now being advertised.",
|
||||
instance_name
|
||||
);
|
||||
info!("Agent is running. Press Ctrl+C to exit.");
|
||||
|
||||
for iface in get_if_addrs::get_if_addrs().unwrap() {
|
||||
println!("{:#?}", iface);
|
||||
}
|
||||
|
||||
// Keep the agent running indefinitely.
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Shutting down agent.");
|
||||
}
|
||||
109
adr/agent_discovery/mdns/src/discover.rs
Normal file
109
adr/agent_discovery/mdns/src/discover.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
|
||||
use crate::SERVICE_TYPE;
|
||||
|
||||
pub async fn discover() {
|
||||
println!("Starting Harmony Master and browsing for agents...");
|
||||
|
||||
// Create a new mDNS daemon.
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
||||
|
||||
// Start browsing for the service type.
|
||||
// The receiver will be a stream of events.
|
||||
let receiver = mdns.browse(SERVICE_TYPE).expect("Failed to browse");
|
||||
|
||||
println!(
|
||||
"Listening for mDNS events for '{}'. Press Ctrl+C to exit.",
|
||||
SERVICE_TYPE
|
||||
);
|
||||
|
||||
std::thread::spawn(move || {
|
||||
while let Ok(event) = receiver.recv() {
|
||||
match event {
|
||||
ServiceEvent::ServiceData(resolved) => {
|
||||
println!("Resolved a new service: {}", resolved.fullname);
|
||||
}
|
||||
other_event => {
|
||||
println!("Received other event: {:?}", &other_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Gracefully shutdown the daemon.
|
||||
std::thread::sleep(std::time::Duration::from_secs(1000000));
|
||||
mdns.shutdown().unwrap();
|
||||
|
||||
// Process events as they come in.
|
||||
// while let Ok(event) = receiver.recv_async().await {
|
||||
// debug!("Received event {event:?}");
|
||||
// // match event {
|
||||
// // ServiceEvent::ServiceFound(svc_type, fullname) => {
|
||||
// // println!("\n--- Agent Discovered ---");
|
||||
// // println!(" Service Name: {}", fullname());
|
||||
// // // You can now resolve this service to get its IP, port, and TXT records
|
||||
// // // The resolve operation is a separate network call.
|
||||
// // let receiver = mdns.browse(info.get_fullname()).unwrap();
|
||||
// // if let Ok(resolve_event) = receiver.recv_timeout(Duration::from_secs(2)) {
|
||||
// // if let ServiceEvent::ServiceResolved(info) = resolve_event {
|
||||
// // let ip = info.get_addresses().iter().next().unwrap();
|
||||
// // let port = info.get_port();
|
||||
// // let motherboard_id = info.get_property("id").map_or("N/A", |v| v.val_str());
|
||||
// //
|
||||
// // println!(" IP: {}:{}", ip, port);
|
||||
// // println!(" Motherboard ID: {}", motherboard_id);
|
||||
// // println!("------------------------");
|
||||
// //
|
||||
// // // TODO: Add this agent to your central list of discovered hosts.
|
||||
// // }
|
||||
// // } else {
|
||||
// // println!("Could not resolve service '{}' in time.", info.get_fullname());
|
||||
// // }
|
||||
// // }
|
||||
// // ServiceEvent::ServiceRemoved(info) => {
|
||||
// // println!("\n--- Agent Removed ---");
|
||||
// // println!(" Service Name: {}", info.get_fullname());
|
||||
// // println!("---------------------");
|
||||
// // // TODO: Remove this agent from your list.
|
||||
// // }
|
||||
// // _ => {
|
||||
// // // We don't care about other event types for this example
|
||||
// // }
|
||||
// // }
|
||||
// }
|
||||
}
|
||||
|
||||
async fn _discover_example() {
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
|
||||
// Create a daemon
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create daemon");
|
||||
|
||||
// Use recently added `ServiceEvent::ServiceData`.
|
||||
mdns.use_service_data(true)
|
||||
.expect("Failed to use ServiceData");
|
||||
|
||||
// Browse for a service type.
|
||||
let service_type = "_mdns-sd-my-test._udp.local.";
|
||||
let receiver = mdns.browse(service_type).expect("Failed to browse");
|
||||
|
||||
// Receive the browse events in sync or async. Here is
|
||||
// an example of using a thread. Users can call `receiver.recv_async().await`
|
||||
// if running in async environment.
|
||||
std::thread::spawn(move || {
|
||||
while let Ok(event) = receiver.recv() {
|
||||
match event {
|
||||
ServiceEvent::ServiceData(resolved) => {
|
||||
println!("Resolved a new service: {}", resolved.fullname);
|
||||
}
|
||||
other_event => {
|
||||
println!("Received other event: {:?}", &other_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Gracefully shutdown the daemon.
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
mdns.shutdown().unwrap();
|
||||
}
|
||||
31
adr/agent_discovery/mdns/src/main.rs
Normal file
31
adr/agent_discovery/mdns/src/main.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use clap::{Parser, ValueEnum};
|
||||
|
||||
mod advertise;
|
||||
mod discover;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
#[arg(value_enum)]
|
||||
profile: Profiles,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
|
||||
enum Profiles {
|
||||
Advertise,
|
||||
Discover,
|
||||
}
|
||||
|
||||
// The service type we are looking for.
|
||||
const SERVICE_TYPE: &str = "_harmony._tcp.local.";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
let args = Args::parse();
|
||||
|
||||
match args.profile {
|
||||
Profiles::Advertise => advertise::advertise().await,
|
||||
Profiles::Discover => discover::discover().await,
|
||||
}
|
||||
}
|
||||
8
check.sh
Executable file
8
check.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
rustc --version
|
||||
cargo check --all-targets --all-features --keep-going
|
||||
cargo fmt --check
|
||||
cargo clippy
|
||||
cargo test
|
||||
BIN
data/okd/bin/kubectl
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/kubectl
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/okd/bin/oc
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/oc
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/okd/bin/oc_README.md
(Stored with Git LFS)
Normal file
BIN
data/okd/bin/oc_README.md
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/bin/openshift-install
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/openshift-install
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/okd/bin/openshift-install_README.md
(Stored with Git LFS)
Normal file
BIN
data/okd/bin/openshift-install_README.md
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
(Stored with Git LFS)
Normal file
Binary file not shown.
1
data/okd/installer_image/scos-live-initramfs.x86_64.img
Symbolic link
1
data/okd/installer_image/scos-live-initramfs.x86_64.img
Symbolic link
@@ -0,0 +1 @@
|
||||
scos-9.0.20250510-0-live-initramfs.x86_64.img
|
||||
1
data/okd/installer_image/scos-live-kernel.x86_64
Symbolic link
1
data/okd/installer_image/scos-live-kernel.x86_64
Symbolic link
@@ -0,0 +1 @@
|
||||
scos-9.0.20250510-0-live-kernel.x86_64
|
||||
1
data/okd/installer_image/scos-live-rootfs.x86_64.img
Symbolic link
1
data/okd/installer_image/scos-live-rootfs.x86_64.img
Symbolic link
@@ -0,0 +1 @@
|
||||
scos-9.0.20250510-0-live-rootfs.x86_64.img
|
||||
8
data/pxe/okd/README.md
Normal file
8
data/pxe/okd/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
Here lies all the data files required for an OKD cluster PXE boot setup.
|
||||
|
||||
This inclues ISO files, binary boot files, ipxe, etc.
|
||||
|
||||
TODO as of august 2025 :
|
||||
|
||||
- `harmony_inventory_agent` should be downloaded from official releases, this embedded version is practical for now though
|
||||
- The cluster ssh key should be generated and handled by harmony with the private key saved in a secret store
|
||||
9
data/pxe/okd/http_files/.gitattributes
vendored
Normal file
9
data/pxe/okd/http_files/.gitattributes
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
harmony_inventory_agent filter=lfs diff=lfs merge=lfs -text
|
||||
os filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9 filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/initrd.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/vmlinuz filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/efiboot.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/install.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/pxeboot filter=lfs diff=lfs merge=lfs -text
|
||||
1
data/pxe/okd/http_files/cluster_ssh_key.pub
Normal file
1
data/pxe/okd/http_files/cluster_ssh_key.pub
Normal file
@@ -0,0 +1 @@
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Executable file
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/install.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/install.img
(Stored with Git LFS)
Normal file
Binary file not shown.
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/pxeboot/vmlinuz
Executable file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/pxeboot/vmlinuz
Executable file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/initrd.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/initrd.img
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
(Stored with Git LFS)
Executable file
BIN
data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
data/pxe/okd/tftpboot/ipxe.efi
Normal file
BIN
data/pxe/okd/tftpboot/ipxe.efi
Normal file
Binary file not shown.
BIN
data/pxe/okd/tftpboot/undionly.kpxe
Normal file
BIN
data/pxe/okd/tftpboot/undionly.kpxe
Normal file
Binary file not shown.
1
data/watchguard/pxe-http-files/.gitattributes
vendored
Normal file
1
data/watchguard/pxe-http-files/.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
slitaz/* filter=lfs diff=lfs merge=lfs -text
|
||||
6
data/watchguard/pxe-http-files/boot.ipxe
Normal file
6
data/watchguard/pxe-http-files/boot.ipxe
Normal file
@@ -0,0 +1,6 @@
|
||||
#!ipxe
|
||||
|
||||
set base-url http://192.168.33.1:8080
|
||||
set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe
|
||||
|
||||
chain ${hostfile} || chain ${base-url}/default.ipxe
|
||||
@@ -0,0 +1,35 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Boot to Slitaz - old linux for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/nvme0n1
|
||||
set ignition-file ncd0/master.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -0,0 +1,35 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Boot to Slitaz - old linux for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/nvme0n1
|
||||
set ignition-file ncd0/master.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -0,0 +1,35 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Slitaz - an old linux image for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/sda
|
||||
set ignition-file ncd0/worker.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -0,0 +1,35 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Boot to Slitaz - old linux for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/nvme0n1
|
||||
set ignition-file ncd0/master.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -0,0 +1,35 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Slitaz - an old linux image for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/sda
|
||||
set ignition-file ncd0/worker.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -0,0 +1,37 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Slitaz - an old linux image for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
# This is the bootstrap node
|
||||
# it will become wk2
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/sda
|
||||
set ignition-file ncd0/worker.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
71
data/watchguard/pxe-http-files/default.ipxe
Normal file
71
data/watchguard/pxe-http-files/default.ipxe
Normal file
@@ -0,0 +1,71 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item local Boot from Hard Disk
|
||||
item slitaz Boot slitaz live environment [tux|root:root]
|
||||
#item ubuntu-server Ubuntu 24.04.1 live server
|
||||
#item ubuntu-desktop Ubuntu 24.04.1 desktop
|
||||
#item systemrescue System Rescue 11.03
|
||||
item memtest memtest
|
||||
#choose --default local --timeout 5000 selected
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
|
||||
#################################
|
||||
# Ubuntu Server
|
||||
#################################
|
||||
:ubuntu-server
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/ubuntu/live-server-24.04.1
|
||||
|
||||
kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-live-server-amd64.iso autoinstall ds=nocloud
|
||||
initrd ${base_url}/initrd
|
||||
boot
|
||||
|
||||
#################################
|
||||
# Ubuntu Desktop
|
||||
#################################
|
||||
:ubuntu-desktop
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/ubuntu/desktop-24.04.1
|
||||
|
||||
kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-desktop-amd64.iso autoinstall ds=nocloud
|
||||
initrd ${base_url}/initrd
|
||||
boot
|
||||
|
||||
#################################
|
||||
# System Rescue
|
||||
#################################
|
||||
:systemrescue
|
||||
set base-url http://192.168.33.1:8080/systemrescue
|
||||
|
||||
kernel ${base-url}/vmlinuz initrd=sysresccd.img boot=systemrescue docache
|
||||
initrd ${base-url}/sysresccd.img
|
||||
boot
|
||||
|
||||
#################################
|
||||
# MemTest86 (BIOS/UEFI)
|
||||
#################################
|
||||
:memtest
|
||||
iseq ${platform} efi && goto memtest_efi || goto memtest_bios
|
||||
|
||||
:memtest_efi
|
||||
kernel http://192.168.33.1:8080/memtest/memtest64.efi
|
||||
boot
|
||||
|
||||
:memtest_bios
|
||||
kernel http://192.168.33.1:8080/memtest/memtest64.bin
|
||||
boot
|
||||
BIN
data/watchguard/pxe-http-files/memtest86/memtest32.bin
Normal file
BIN
data/watchguard/pxe-http-files/memtest86/memtest32.bin
Normal file
Binary file not shown.
BIN
data/watchguard/pxe-http-files/memtest86/memtest32.efi
Normal file
BIN
data/watchguard/pxe-http-files/memtest86/memtest32.efi
Normal file
Binary file not shown.
BIN
data/watchguard/pxe-http-files/memtest86/memtest64.bin
Normal file
BIN
data/watchguard/pxe-http-files/memtest86/memtest64.bin
Normal file
Binary file not shown.
BIN
data/watchguard/pxe-http-files/memtest86/memtest64.efi
Normal file
BIN
data/watchguard/pxe-http-files/memtest86/memtest64.efi
Normal file
Binary file not shown.
BIN
data/watchguard/pxe-http-files/memtest86/memtestla64.efi
Normal file
BIN
data/watchguard/pxe-http-files/memtest86/memtestla64.efi
Normal file
Binary file not shown.
@@ -1 +0,0 @@
|
||||
hey i am paul
|
||||
BIN
data/watchguard/pxe-http-files/slitaz/rootfs.gz
(Stored with Git LFS)
Normal file
BIN
data/watchguard/pxe-http-files/slitaz/rootfs.gz
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz
(Stored with Git LFS)
Normal file
BIN
data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
data/watchguard/tftpboot/ipxe.efi
Normal file
BIN
data/watchguard/tftpboot/ipxe.efi
Normal file
Binary file not shown.
BIN
data/watchguard/tftpboot/undionly.kpxe
Normal file
BIN
data/watchguard/tftpboot/undionly.kpxe
Normal file
Binary file not shown.
132
demos/cncf-k8s-quebec-meetup-september-2025/storyline.md
Normal file
132
demos/cncf-k8s-quebec-meetup-september-2025/storyline.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# Harmony, Orchestrateur d'infrastructure open-source
|
||||
|
||||
**Target Duration:** 25 minutes\
|
||||
**Tone:** Friendly, expert-to-expert, inspiring.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 1: Title Slide**
|
||||
|
||||
- **Visual:** Clean and simple. Your company logo (NationTech) and the Harmony logo.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 2: The YAML Labyrinth**
|
||||
|
||||
**Goal:** Get every head in the room nodding in agreement. Start with their world, not yours.
|
||||
|
||||
- **Visual:**
|
||||
- Option A: "The Pull Request from Hell". A screenshot of a GitHub pull request for a seemingly minor change that touches dozens of YAML files across multiple directories. A sea of red and green diffs that is visually overwhelming.
|
||||
- Option B: A complex flowchart connecting dozens of logos: Terraform, Ansible, K8s, Helm, etc.
|
||||
- **Narration:**\
|
||||
[...ADD SOMETHING FOR INTRODUCTION...]\
|
||||
"We love the power that tools like Kubernetes and the CNCF landscape have given us. But let's be honest... when did our infrastructure code start looking like _this_?"\
|
||||
"We have GitOps, which is great. But it often means we're managing this fragile cathedral of YAML, Helm charts, and brittle scripts. We spend more time debugging indentation and tracing variables than we do building truly resilient systems."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 3: The Real Cost of Infrastructure**
|
||||
|
||||
- **Visual:** "The Jenga Tower of Tools". A tall, precarious Jenga tower where each block is the logo of a different tool (Terraform, K8s, Helm, Ansible, Prometheus, ArgoCD, etc.). One block near the bottom is being nervously pulled out.
|
||||
- **Narration:**
|
||||
"The real cost isn't just complexity; it's the constant need to choose, learn, integrate, and operate a dozen different tools, each with its own syntax and failure modes. It's the nagging fear that a tiny typo in a config file could bring everything down. Click-ops isn't the answer, but the current state of IaC feels like we've traded one problem for another."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 4: The Broken Promise of "Code"**
|
||||
|
||||
**Goal:** Introduce the core idea before introducing the product. This makes the solution feel inevitable.
|
||||
|
||||
- **(Initial Visual):** A two-panel slide.
|
||||
- **Left Panel Title: "The Plan"** - A terminal showing a green, successful `terraform plan` output.
|
||||
- **Right Panel Title: "The Reality"** - The _next_ screen in the terminal, showing the `terraform apply` failing with a cascade of red error text.
|
||||
- **Narration:**
|
||||
"We call our discipline **Infrastructure as Code**. And we've all been here. Our 'compiler' is a `terraform plan` that says everything looks perfect. We get the green light."
|
||||
(Pause for a beat)
|
||||
"And then we `apply`, and reality hits. It fails halfway through, at runtime, when it's most expensive and painful to fix."
|
||||
|
||||
**(Click to transition the slide)**
|
||||
|
||||
- **(New Visual):** The entire slide is replaced by a clean screenshot of a code editor (like nvim 😉) showing Harmony's Rust DSL. A red squiggly line is under a config line. The error message is clear in the "Problems" panel: `error: Incompatible deployment. Production target 'gcp-prod-cluster' requires a StorageClass with 'snapshots' capability, but 'standard-sc' does not provide it.`
|
||||
- **Narration (continued):**
|
||||
"In software development, we solved these problems years ago. We don't accept 'it compiled, but crashed on startup'. We have real tools, type systems, compilers, test frameworks, and IDEs that catch our mistakes before they ever reach production. **So, what if we could treat our entire infrastructure... like a modern, compiled application?**"
|
||||
"What if your infrastructure code could get compile-time checks, straight into the editor... instead of runtime panics and failures at 3 AM in production?"
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 5: Introducing Harmony**
|
||||
|
||||
**Goal:** Introduce Harmony as the answer to the "What If?" question.
|
||||
|
||||
- **Visual:** The Harmony logo, large and centered.
|
||||
- **Tagline:** `Infrastructure in type-safe Rust. No YAML required.`
|
||||
- **Narration:**
|
||||
"This is Harmony. It's an open-source orchestrator that lets you define your entire stack — from a dev laptop to a multi-site bare-metal cluster—in a single, type-safe Rust codebase."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 6: Before & After**
|
||||
|
||||
- **Visual:** A side-by-side comparison. Left side: A screen full of complex, nested YAML. Right side: 10-15 lines of clean, readable Harmony Rust DSL that accomplishes the same thing.
|
||||
- **Narration:**
|
||||
"This is the difference. On the left, the fragile world of strings and templates. On the right, a portable, verifiable program that describes your apps, your infra, and your operations. We unify scaffolding, provisioning, and Day-2 ops, all verified by the Rust compiler. But enough slides... let's see it in action."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 7: Live Demo: Zero to Monitored App**
|
||||
|
||||
**Goal:** Show, don't just tell. Make it look effortless. This is where you build the "dream."
|
||||
|
||||
- **Visual:** Your terminal/IDE, ready to go.
|
||||
- **Narration Guide:**
|
||||
"Okay, for this demo, we're going to take a standard web app from GitHub. Nothing special about it."
|
||||
_(Show the repo)_
|
||||
"Now, let's bring it into Harmony. This is the entire definition we need to describe the application and its needs."
|
||||
_(Show the Rust DSL)_
|
||||
"First, let's run it locally on k3d. The exact same definition for dev as for prod."
|
||||
_(Deploy locally, show it works)_
|
||||
"Cool. But a real app needs monitoring. In Harmony, that's just adding a feature to our code."
|
||||
_(Uncomment one line: `.with_feature(Monitoring)` and redeploy)_
|
||||
"And just like that, we have a fully configured Prometheus and Grafana stack, scraping our app. No YAML, no extra config."
|
||||
"Finally, let's push this to our production staging cluster. We just change the target and specify our multi-site Ceph storage."
|
||||
_(Deploy to the remote cluster)_
|
||||
"And there it is. We've gone from a simple web app to a monitored, enterprise-grade service in minutes."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 8: Live Demo: Embracing Chaos**
|
||||
|
||||
**Goal:** Prove the "predictable" and "resilient" claims in the most dramatic way possible.
|
||||
|
||||
- **Visual:** A slide showing a map or diagram of your distributed infrastructure (the different data centers). Then switch back to your terminal.
|
||||
- **Narration Guide:**
|
||||
"This is great when things are sunny. But production is chaos. So... let's break things. On purpose."
|
||||
"First, a network failure." _(Kill a switch/link, show app is still up)_
|
||||
"Now, let's power off a storage server." _(Force off a server, show Ceph healing and the app is unaffected)_
|
||||
"How about a control plane node?" _(Force off a k8s control plane, show the cluster is still running)_
|
||||
"Okay, for the grand finale. What if we have a cascading failure? I'm going to kill _another_ storage server. This should cause a total failure in this data center."
|
||||
_(Force off the second server, narrate what's happening)_
|
||||
"And there it is... Ceph has lost quorum in this site... and Harmony has automatically failed everything over to our other datacenter. The app is still running."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 9: The New Reality**
|
||||
|
||||
**Goal:** Summarize the dream and tell the audience what you want them to do.
|
||||
|
||||
- **Visual:** The clean, simple Harmony Rust DSL code from Slide 6. A summary of what was just accomplished is listed next to it: `✓ GitHub to Prod in minutes`, `✓ Type-Safe Validation`, `✓ Built-in Monitoring`, `✓ Automated Multi-Site Failover`.
|
||||
- **Narration:**
|
||||
"So, in just a few minutes, we went from a simple web app to a multi-site, monitored, and chaos-proof production deployment. We did it with a small amount of code that is easy to read, easy to verify, and completely portable. This is our vision: to offload the complexity, and make infrastructure simple, predictable, and even fun again."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 10: Join Us**
|
||||
|
||||
- **Visual:** A clean, final slide with QR codes and links.
|
||||
- GitHub Repo (`github.com/nation-tech/harmony`)
|
||||
- Website (`harmony.sh` or similar)
|
||||
- Your contact info (`jg@nation.tech` / LinkedIn / Twitter)
|
||||
- **Narration:**
|
||||
"Harmony is open-source, AGPLv3. We believe this is the future, but we're just getting started. We know this crowd has great infrastructure minds out there, and we need your feedback. Please, check out the project on GitHub. Star it if you like what you see. Tell us what's missing. Let's build this future together. Thank you."
|
||||
|
||||
**(Open for Q&A)**
|
||||
8
docs/OKD_Host_preparation.md
Normal file
8
docs/OKD_Host_preparation.md
Normal file
@@ -0,0 +1,8 @@
|
||||
## Bios settings
|
||||
|
||||
1. CSM : Disabled (compatibility support to boot gpt formatted drives)
|
||||
2. Secure boot : disabled
|
||||
3. Boot order :
|
||||
1. Local Hard drive
|
||||
2. PXE IPv4
|
||||
4. System clock, make sure it is adjusted, otherwise you will get invalid certificates error
|
||||
1
docs/README.md
Normal file
1
docs/README.md
Normal file
@@ -0,0 +1 @@
|
||||
Not much here yet, see the `adr` folder for now. More to come in time!
|
||||
13
docs/cyborg-metaphor.md
Normal file
13
docs/cyborg-metaphor.md
Normal file
@@ -0,0 +1,13 @@
|
||||
## Conceptual metaphor : The Cyborg and the Central Nervous System
|
||||
|
||||
At the heart of Harmony lies a core belief: in modern, decentralized systems, **software and infrastructure are not separate entities.** They are a single, symbiotic organism—a cyborg.
|
||||
|
||||
The software is the electronics, the "mind"; the infrastructure is the biological host, the "body". They live or die, thrive or sink together.
|
||||
|
||||
Traditional approaches attempt to manage this complex organism with fragmented tools: static YAML for configuration, brittle scripts for automation, and separate Infrastructure as Code (IaC) for provisioning. This creates a disjointed system that struggles to scale or heal itself, making it inadequate for the demands of fully automated, enterprise-grade clusters.
|
||||
|
||||
Harmony's goal is to provide the **central nervous system for this cyborg**. We aim to achieve the full automation of complex, decentralized clouds by managing this integrated entity holistically.
|
||||
|
||||
To achieve this, a tool must be both robust and powerful. It must manage the entire lifecycle—deployment, upgrades, failure recovery, and decommissioning—with precision. This requires full control over application packaging and a deep, intrinsic integration between the software and the infrastructure it inhabits.
|
||||
|
||||
This is why Harmony uses a powerful, living language like Rust. It replaces static, lifeless configuration files with a dynamic, breathing codebase. It allows us to express the complex relationships and behaviors of a modern distributed system, enabling the creation of truly automated, resilient, and powerful platforms that can thrive.
|
||||
4
docs/diagrams/Harmony_Core_Architecture.drawio.svg
Normal file
4
docs/diagrams/Harmony_Core_Architecture.drawio.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 1.1 MiB |
108
docs/pxe_test/README.md
Normal file
108
docs/pxe_test/README.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# OPNsense PXE Lab Environment
|
||||
|
||||
This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall.
|
||||
|
||||
## Overview
|
||||
|
||||
The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM:
|
||||
|
||||
1. **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab.
|
||||
2. **Two Virtual Machines**:
|
||||
* `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server.
|
||||
* `pxe-node-1`: A client VM configured to boot from the network.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Ensure you have the following software installed on your Arch Linux host:
|
||||
|
||||
* `libvirt`
|
||||
* `qemu`
|
||||
* `virt-install` (from the `virt-install` package)
|
||||
* `curl`
|
||||
* `bzip2`
|
||||
|
||||
## Usage
|
||||
|
||||
### 1. Create the Environment
|
||||
|
||||
Run the `up` command to download the necessary images and create the network and VMs.
|
||||
|
||||
```bash
|
||||
sudo ./pxe_vm_lab_setup.sh up
|
||||
```
|
||||
|
||||
### 2. Install and Configure OPNsense
|
||||
|
||||
The OPNsense VM is created but the OS needs to be installed manually via the console.
|
||||
|
||||
1. **Connect to the VM console**:
|
||||
```bash
|
||||
sudo virsh console opnsense-pxe
|
||||
```
|
||||
|
||||
2. **Log in as the installer**:
|
||||
* Username: `installer`
|
||||
* Password: `opnsense`
|
||||
|
||||
3. **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`):
|
||||
* Find the MAC address for the `harmonylan` interface by running this command in another terminal:
|
||||
```bash
|
||||
virsh domiflist opnsense-pxe
|
||||
# Example output:
|
||||
# Interface Type Source Model MAC
|
||||
# ---------------------------------------------------------
|
||||
# vnet18 network default virtio 52:54:00:b5:c4:6d
|
||||
# vnet19 network harmonylan virtio 52:54:00:21:f9:ba
|
||||
```
|
||||
* Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**.
|
||||
* Assign the other interface as your **WAN**.
|
||||
|
||||
4. After the installation is complete, **shut down** the VM from the console menu.
|
||||
|
||||
5. **Detach the installation media** by editing the VM's configuration:
|
||||
```bash
|
||||
sudo virsh edit opnsense-pxe
|
||||
```
|
||||
Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`).
|
||||
|
||||
6. **Start the VM** to boot into the newly installed system:
|
||||
```bash
|
||||
sudo virsh start opnsense-pxe
|
||||
```
|
||||
|
||||
### 3. Connect to OPNsense from Your Host
|
||||
|
||||
To configure OPNsense, you need to connect your host to the `harmonylan` network.
|
||||
|
||||
1. By default, OPNsense configures its LAN interface with the IP `192.168.1.1`.
|
||||
2. Assign a compatible IP address to your host's `virbr1` bridge interface:
|
||||
```bash
|
||||
sudo ip addr add 192.168.1.5/24 dev virbr1
|
||||
```
|
||||
3. You can now access the OPNsense VM from your host:
|
||||
* **SSH**: `ssh root@192.168.1.1` (password: `opnsense`)
|
||||
* **Web UI**: `https://192.168.1.1`
|
||||
|
||||
### 4. Configure PXE Services with Harmony
|
||||
|
||||
With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details:
|
||||
|
||||
* **Hostname/IP**: `192.168.1.1`
|
||||
* **Credentials**: `root` / `opnsense`
|
||||
|
||||
### 5. Boot the PXE Client
|
||||
|
||||
Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network.
|
||||
|
||||
```bash
|
||||
sudo virsh start pxe-node-1
|
||||
sudo virsh console pxe-node-1
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
To destroy all VMs and networks created by the script, run the `clean` command:
|
||||
|
||||
```bash
|
||||
sudo ./pxe_vm_lab_setup.sh clean
|
||||
```
|
||||
191
docs/pxe_test/pxe_vm_lab_setup.sh
Executable file
191
docs/pxe_test/pxe_vm_lab_setup.sh
Executable file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# --- Configuration ---
|
||||
LAB_DIR="/var/lib/harmony_pxe_test"
|
||||
IMG_DIR="${LAB_DIR}/images"
|
||||
STATE_DIR="${LAB_DIR}/state"
|
||||
VM_OPN="opnsense-pxe"
|
||||
VM_PXE="pxe-node-1"
|
||||
NET_HARMONYLAN="harmonylan"
|
||||
|
||||
# Network settings for the isolated LAN
|
||||
VLAN_CIDR="192.168.150.0/24"
|
||||
VLAN_GW="192.168.150.1"
|
||||
VLAN_MASK="255.255.255.0"
|
||||
|
||||
# VM Specifications
|
||||
RAM_OPN="2048"
|
||||
VCPUS_OPN="2"
|
||||
DISK_OPN_GB="10"
|
||||
OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant
|
||||
|
||||
RAM_PXE="4096"
|
||||
VCPUS_PXE="2"
|
||||
DISK_PXE_GB="40"
|
||||
OS_VARIANT_LINUX="centos-stream9"
|
||||
|
||||
OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2"
|
||||
OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img"
|
||||
CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso"
|
||||
CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso"
|
||||
|
||||
CONNECT_URI="qemu:///system"
|
||||
|
||||
download_if_missing() {
|
||||
local url="$1"
|
||||
local dest="$2"
|
||||
if [[ ! -f "$dest" ]]; then
|
||||
echo "Downloading $url to $dest"
|
||||
mkdir -p "$(dirname "$dest")"
|
||||
local tmp
|
||||
tmp="$(mktemp)"
|
||||
curl -L --progress-bar "$url" -o "$tmp"
|
||||
case "$url" in
|
||||
*.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;;
|
||||
*) mv "$tmp" "$dest" ;;
|
||||
esac
|
||||
else
|
||||
echo "Already present: $dest"
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensures a libvirt network is defined and active
|
||||
ensure_network() {
|
||||
local net_name="$1"
|
||||
local net_xml_path="$2"
|
||||
if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then
|
||||
echo "Network ${net_name} already exists."
|
||||
else
|
||||
echo "Defining network ${net_name} from ${net_xml_path}"
|
||||
virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}"
|
||||
fi
|
||||
|
||||
if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then
|
||||
echo "Starting network ${net_name}..."
|
||||
virsh --connect "${CONNECT_URI}" net-start "${net_name}"
|
||||
virsh --connect "${CONNECT_URI}" net-autostart "${net_name}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Destroys a VM completely
|
||||
destroy_vm() {
|
||||
local vm_name="$1"
|
||||
if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then
|
||||
echo "Destroying and undefining VM: ${vm_name}"
|
||||
virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true
|
||||
virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram
|
||||
fi
|
||||
}
|
||||
|
||||
# Destroys a libvirt network
|
||||
destroy_network() {
|
||||
local net_name="$1"
|
||||
if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then
|
||||
echo "Destroying and undefining network: ${net_name}"
|
||||
virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true
|
||||
virsh --connect "${CONNECT_URI}" net-undefine "$net_name"
|
||||
fi
|
||||
}
|
||||
|
||||
# --- Main Logic ---
|
||||
create_lab_environment() {
|
||||
# Create network definition files
|
||||
cat > "${STATE_DIR}/default.xml" <<EOF
|
||||
<network>
|
||||
<name>default</name>
|
||||
<forward mode='nat'/>
|
||||
<bridge name='virbr0' stp='on' delay='0'/>
|
||||
<ip address='192.168.122.1' netmask='255.255.255.0'>
|
||||
<dhcp>
|
||||
<range start='192.168.122.100' end='192.168.122.200'/>
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF
|
||||
<network>
|
||||
<name>${NET_HARMONYLAN}</name>
|
||||
<bridge name='virbr1' stp='on' delay='0'/>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
# Ensure both networks exist and are active
|
||||
ensure_network "default" "${STATE_DIR}/default.xml"
|
||||
ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml"
|
||||
|
||||
# --- Create OPNsense VM (MODIFIED SECTION) ---
|
||||
local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2"
|
||||
if [[ ! -f "$disk_opn" ]]; then
|
||||
qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G"
|
||||
fi
|
||||
|
||||
echo "Creating OPNsense VM using serial image..."
|
||||
virt-install \
|
||||
--connect "${CONNECT_URI}" \
|
||||
--name "${VM_OPN}" \
|
||||
--ram "${RAM_OPN}" \
|
||||
--vcpus "${VCPUS_OPN}" \
|
||||
--cpu host-passthrough \
|
||||
--os-variant "${OS_VARIANT_OPN}" \
|
||||
--graphics none \
|
||||
--noautoconsole \
|
||||
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
|
||||
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
|
||||
--network network=default,model=virtio \
|
||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
||||
--boot uefi,menu=on
|
||||
|
||||
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
|
||||
echo "The VM will boot from the serial installation image."
|
||||
echo "Login with user 'installer' and password 'opnsense' to start the installation."
|
||||
echo "Install onto the VirtIO disk (vtbd0)."
|
||||
echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system."
|
||||
|
||||
# --- Create PXE Client VM ---
|
||||
local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2"
|
||||
if [[ ! -f "$disk_pxe" ]]; then
|
||||
qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G"
|
||||
fi
|
||||
|
||||
echo "Creating PXE client VM..."
|
||||
virt-install \
|
||||
--connect "${CONNECT_URI}" \
|
||||
--name "${VM_PXE}" \
|
||||
--ram "${RAM_PXE}" \
|
||||
--vcpus "${VCPUS_PXE}" \
|
||||
--cpu host-passthrough \
|
||||
--os-variant "${OS_VARIANT_LINUX}" \
|
||||
--graphics none \
|
||||
--noautoconsole \
|
||||
--disk path="${disk_pxe}",format=qcow2,bus=virtio \
|
||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
||||
--pxe \
|
||||
--boot uefi,menu=on
|
||||
|
||||
echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}."
|
||||
}
|
||||
|
||||
# --- Script Entrypoint ---
|
||||
case "${1:-}" in
|
||||
up)
|
||||
mkdir -p "${IMG_DIR}" "${STATE_DIR}"
|
||||
download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH"
|
||||
download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH"
|
||||
create_lab_environment
|
||||
echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs."
|
||||
;;
|
||||
clean)
|
||||
destroy_vm "${VM_PXE}"
|
||||
destroy_vm "${VM_OPN}"
|
||||
destroy_network "${NET_HARMONYLAN}"
|
||||
# Optionally destroy the default network if you want a full reset
|
||||
# destroy_network "default"
|
||||
echo "Cleanup complete."
|
||||
;;
|
||||
*)
|
||||
echo "Usage: sudo $0 {up|clean}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user