Compare commits

...

113 Commits

Author SHA1 Message Date
f96572848a feat: added an example of adding an msteams channel 2025-05-28 12:43:42 -04:00
78aadadd22 refactor: Prometheus alert channel configuration for extensibility to respect OpenClosed, this makes adding new alert channel types easier, requiring only a new trait implementation without modifying core logic 2025-05-28 12:43:00 -04:00
b5c6e1c99d refactor: used AlertEndPoint trait and impl, built custom types for prometheus alert manager 2025-05-26 11:36:03 -04:00
f94c899bf7 wip:using types to serialze to json and traits/impl to build alert channel routes for prometheus alert manager 2025-05-23 16:17:17 -04:00
77eb1228be feat: microsoft teams integration 2025-05-22 16:10:48 -04:00
e1a8ee1c15 feat: send alerts to multiple alert channels 2025-05-22 14:16:41 -04:00
44b2b092a8 feat:added Slack notifications support 2025-05-21 15:29:14 -04:00
2b6d2e8606 fix:merge confict 2025-05-20 16:05:38 -04:00
7fc2b1ebfe feat: added monitoring stack example to lamp demo 2025-05-20 15:59:01 -04:00
e80752ea3f feat: install discord alert manager helm chart when Discord is the chosen alerting channel 2025-05-20 15:51:03 -04:00
bae7222d64 Our own Helm Command/Resource/Executor (WIP) (#13)
Co-authored-by: tahahawa <tahahawa@gmail.com>
Reviewed-on: #13
Co-authored-by: Taha Hawa <taha@taha.dev>
Co-committed-by: Taha Hawa <taha@taha.dev>
2025-05-20 14:01:10 +00:00
f7d3da3ac9 fix merge conflict 2025-05-15 15:31:26 -04:00
eb8a8a2e04 chore: modified build config to be able to pass namespace to the config 2025-05-15 15:19:40 -04:00
b4c6848433 feat: added default monitoringStackScore implementation 2025-05-15 14:52:04 -04:00
0d94c537a0 feat: add ingress score (#32)
Co-authored-by: tahahawa <tahahawa@gmail.com>
Reviewed-on: #32
Reviewed-by: wjro <wrolleman@nationtech.io>
2025-05-15 16:11:40 +00:00
861f266c4e Merge pull request 'feat: LAMP stack and Monitoring stack now work on OKD, we just have to manually set a few serviceaccounts to privileged scc until we find a better solution' (#36) from feat/lampOKD into master
Reviewed-on: #36
2025-05-14 15:48:56 +00:00
51724d0e55 feat: LAMP stack and Monitoring stack now work on OKD, we just have to manually set a few serviceaccounts to privileged scc until we find a better solution 2025-05-14 11:47:39 -04:00
c2d1cb9b76 Merge pull request 'upgrade stack size from default 1MB on windows (k3d stack overflow otherwise)' (#34) from windows-stack-size-increase into master
Reviewed-on: #34
Reviewed-by: johnride <jg@nationtech.io>
2025-05-14 14:29:51 +00:00
tahahawa
c84a02c8ec upgrade stack size from default 1MB on windows (k3d stack overflow otherwise) 2025-05-11 22:39:23 -04:00
8d3d167848 fix: Remove todo statements for lamp score and k8s related features that are now complete! 2025-05-06 14:46:57 -04:00
94f6cc6942 fix: kube_prometheus missing new field repo in HelmChartScore 2025-05-06 13:57:58 -04:00
4a9b95acad Merge pull request 'monitoring-alerting' (#30) from monitoring-alerting into master
Reviewed-on: #30
2025-05-06 17:50:56 +00:00
ef9c1cce77 fix:yaml structure 2025-05-06 13:42:59 -04:00
df65ac3439 formatting: Fix format of load_balancer.rs 2025-05-06 13:38:21 -04:00
e5ddd296db Merge pull request 'feat: add cert-manager module and helm repo support' (#31) from feat/awsOKD into master
Reviewed-on: #31
2025-05-06 16:39:19 +00:00
4be008556e feat: add cert-manager module and helm repo support
- Implemented a new `cert-manager` module for deploying cert-manager.
- Added support for specifying a Helm repository in module configurations.
- Introduced `cert_manager` module in `modules/mod.rs`.
- Created `src/modules/cert_manager` directory and its associated code.
- Implemented `add_repo` function in `src/modules/helm.rs` for adding Helm repositories.
- Updated `LAMPInterpret` and `lamp.rs` to integrate the new module.
- Added logging for Helm command execution.
- Updated k8s deployment file to remove unused DeepMerge dependency.
2025-05-06 16:38:57 +00:00
78e9893341 Merge pull request 'feat: started to prepare inventory / topoplogy for NCD' (#1) from feat/settingUpNDC into master
Reviewed-on: #1
2025-05-06 16:38:40 +00:00
d9921b857b fix:installs helm chart 2025-05-06 12:23:03 -04:00
e62ef001ed fix: Fix opnsense test, Host.tll now optional and run cargo fmt 2025-05-06 12:00:56 -04:00
1fb7132c64 Merge branch 'master' into feat/settingUpNDC 2025-05-06 11:58:12 -04:00
2d74c66fc6 wip: trying to get the kube-prometheus score to install 2025-05-06 11:54:10 -04:00
8a199b64f5 feat: Upgrade opnsense-config crates to be compatible with opnsense 25.1_5 2025-05-06 11:45:19 -04:00
b7fe62fcbb feat: ncd0 example complete. Missing files for authentication, ignition etc are accessible upon deman. This is yet another great step towards full UPI automated provisionning 2025-05-06 11:44:40 -04:00
cd8542258c Merge remote-tracking branch 'origin/master' into monitoring-alerting 2025-05-06 10:03:27 -04:00
472a3c1051 fix: correctly pass namespace and monitoring stack to topology so it can be used to init the maestro and exec the score 2025-05-06 10:02:21 -04:00
88270ece61 fix: refactor so that the topology installs the MonitoringAlertingStack depending on if it is already present in the cluster 2025-05-05 16:37:15 -04:00
e7cfbf914a feat: added basic alert for pvc 95% full to kube-prometheus score 2025-05-05 15:38:37 -04:00
fbd466a85c added file 2025-05-05 13:40:32 -04:00
2f8e150f41 feat: added Score and topology to create kube prometheus monitoring and alerting stack 2025-05-05 12:49:28 -04:00
764fd6d451 Merge pull request 'chore: added default mariadb size and pass env variables to php app' (#28) from lamp-env-vars into master
Reviewed-on: #28
Reviewed-by: johnride <jg@nationtech.io>
2025-05-03 00:20:47 +00:00
78fffcd725 fix: specified 2Gi db size from LAMPconfig 2025-05-02 15:07:39 -04:00
e1133ea114 use default database_size None in LampConfig to default to value from helm chart 2025-05-02 15:02:50 -04:00
d8e8a49745 Merge pull request 'feat:php program to fill pvc and report database usage' (#29) from pvc-filler into master
Reviewed-on: #29
2025-05-02 16:06:46 +00:00
a7ba9be486 feat:php program to fill pvc and report database usage 2025-05-02 12:03:18 -04:00
1c3669cb47 chore: added default mariadb size and pass env variables to php app 2025-05-02 11:56:27 -04:00
90b80b24bc Merge pull request 'feat: push docker image to registry and deploy with full tag' (#27) from feat/lampDatabase into master
Reviewed-on: #27
Reviewed-by: wjro <wrolleman@nationtech.io>
2025-05-01 17:39:23 +00:00
c879ca143f feat: Add comments explaining a bit of what harmony does in the lamp demo 2025-04-30 23:36:12 -04:00
bc2bd2f2f4 feat: push docker image to registry and deploy with full tag
- Added functionality to tag and push the built Docker image to a specified registry.
- Modified deployment score to use the full image tag (including registry and project).
- Included error handling and logging for the `docker tag` and `docker push` commands.
- Updated the `K8sDeploymentScore` struct to include a namespace field and environment variables for database credentials.
- Added kebab-case conversion for deployment name and namespace.
- Implemented a check_output function for better error reporting.
2025-04-30 22:33:31 -04:00
28978299c9 Merge pull request 'feat: add mariadb helm deployment to lamp interpreter' (#26) from feat/lampDatabase into master
Reviewed-on: #26
Reviewed-by: wjro <wrolleman@nationtech.io>
2025-04-30 20:03:13 +00:00
87f6afc249 feat: add mariadb helm deployment to lamp interpreter
- Adds a `deploy_database` function to the `LAMPInterpret` struct to deploy a MariaDB database using Helm.
- Integrates `HelmCommand` trait requirement to the `LAMPInterpret` struct.
- Introduces `HelmChartScore` to manage MariaDB deployment.
- Adds namespace configuration for helm deployments.
- Updates trait bounds for `LAMPInterpret` to include `HelmCommand`.
- Implements `get_namespace` function to retrieve the namespace.
2025-04-30 15:40:26 -04:00
254f392cb5 feat(HelmScore): Add values yaml option to helm chart score (#23)
Co-authored-by: tahahawa <tahahawa@gmail.com>
Reviewed-on: #23
2025-04-29 16:09:04 +00:00
a6bcaade46 wip: alerting 2025-04-29 11:28:32 -04:00
6c145f1100 wip: initial layout 2025-04-28 16:31:22 -04:00
40cd765019 WIP: initial layout for MonitoringStackScore 2025-04-28 16:18:44 -04:00
db9c8d83e6 update adr 2025-04-28 15:09:11 -04:00
20551b4a80 adr for monitoring and alerting 2025-04-28 14:11:44 -04:00
5c026ae6dd chore: improved error message for helm unavailable 2025-04-28 10:11:57 -04:00
76c0cacc1b Merge pull request 'feat: LampScore implement dockerfile generation and image building' (#22) from feat/lampDocker into master
Reviewed-on: #22
Reviewed-by: wjro <wrolleman@nationtech.io>
2025-04-27 19:56:29 +00:00
f17948397f feat: escape PHP_ERROR_REPORTING value in Dockerfile
Escapes the value of the PHP_ERROR_REPORTING environment variable in the Dockerfile to prevent potential issues with shell interpretation. Uses EnvBuilder for a more structured approach.
2025-04-27 15:55:12 -04:00
16a665241e feat: LampScore implement dockerfile generation and image building
- Added `build_dockerfile` function to generate a Dockerfile based on the LAMP stack for the given project.
- Implemented `build_docker_image` to execute the docker build command and create the image.
- Configured user and permissions for apache.
- Included necessary apache configuration for security.
- Added error handling for docker build failures.
- Exposed port 80 for external access.
- Added basic serialization to Config struct.
2025-04-25 14:34:57 -04:00
065e3904b8 Merge pull request 'fix(k8s_anywhere): Ensure k3d cluster is started before use' (#21) from feat/k3d into master
Reviewed-on: #21
Reviewed-by: wjro <wrolleman@nationtech.io>
Reviewed-by: taha <taha@noreply.git.nationtech.io>
2025-04-25 16:46:28 +00:00
22752960f9 fix(k8s_anywhere): Ensure k3d cluster is started before use
- Refactor k3d cluster management to explicitly start the cluster.
- Introduce `start_cluster` function to ensure cluster is running before operations.
- Improve error handling and logging during cluster startup.
- Update `create_cluster` and other related functions to utilize the new startup mechanism.
- Enhance reliability and prevent potential issues caused by an uninitialized cluster.
- Add `run_k3d_command` to handle k3d commands with logging and error handling.
2025-04-25 12:45:02 -04:00
23971ecd7c Merge pull request 'feat: implement k3d cluster management' (#20) from feat/k3d into master
Reviewed-on: #20
Reviewed-by: wjro <wrolleman@nationtech.io>
Reviewed-by: taha <taha@noreply.git.nationtech.io>
2025-04-25 15:33:13 +00:00
fbcd3e4f7f feat: implement k3d cluster management
- Adds functionality to download, install, and manage k3d clusters.
- Includes methods for downloading the latest release, creating clusters, and verifying cluster existence.
- Implements `ensure_k3d_installed`, `get_latest_release_tag`, `download_latest_release`, `is_k3d_installed`, `verify_cluster_exists`, `create_cluster` and `create_kubernetes_client`.
- Provides a `get_client` method to access the Kubernetes client.
- Includes unit tests for download and installation.
- Adds handling for different operating systems.
- Improves error handling and logging.
- Introduces a `K3d` struct to encapsulate k3d cluster management logic.
- Adds the ability to specify the cluster name during K3d initialization.
2025-04-24 17:36:01 -04:00
d307893f15 fix: small-fixes (#19)
Reviewed-on: #19
Reviewed-by: johnride <jg@nationtech.io>
Co-authored-by: Taha Hawa <taha@taha.dev>
Co-committed-by: Taha Hawa <taha@taha.dev>
2025-04-24 18:47:47 +00:00
00c0566533 Merge pull request 'feat: introduce Maestro::initialize function that creates the maestro instance and ensure_ready the topology as well. Also refactor all relevant examples to use this new initialize function' (#18) from feat/maestroinitialize into master
Reviewed-on: #18
Reviewed-by: taha <taha@noreply.git.nationtech.io>
2025-04-24 17:43:31 +00:00
f5e3f1aaea feat: Add check.sh helper script to make sure code looks OK before pushing 2025-04-24 13:16:20 -04:00
508b97ca7c chore: Fix more warnings 2025-04-24 13:14:35 -04:00
80bdd0ee8a feat: introduce Maestro::initialize function that creates the maestro instance and ensure_ready the topology as well. Also refactor all relevant examples to use this new initialize function 2025-04-24 12:58:41 -04:00
6c06a4ae07 feat: update ensure_ready to check helm is available (#17)
I want to make sure the changes I'm working on in the ensure_ready don't break anything

Reviewed-on: #17
Reviewed-by: taha <taha@noreply.git.nationtech.io>
Co-authored-by: Willem <wrolleman@nationtech.io>
Co-committed-by: Willem <wrolleman@nationtech.io>
2025-04-24 15:51:28 +00:00
ad1aa897b1 Merge pull request 'chore: Fix all warnings in the project, ignore unused variables mostly' (#16) from chore/warnings into master
Reviewed-on: #16
Reviewed-by: wjro <wrolleman@nationtech.io>
2025-04-24 14:28:14 +00:00
dccc9c04f5 chore: Fix all warnings in the project, ignore unused variables mostly 2025-04-24 10:22:53 -04:00
9345e63a32 fix: couple of changes to get a test working 2025-04-23 15:31:02 -04:00
ff830486af Merge pull request 'fix(cli): remove need for debug in harmony-cli' (#15) from harmony-cli-remove-debug into master
Reviewed-on: #15
Reviewed-by: johnride <jg@nationtech.io>
2025-04-23 18:55:06 +00:00
da83019d85 remove need for debug in harmony-cli 2025-04-23 14:53:36 -04:00
53aa47f91e feat: Initial helm score using helm-wrapper-rs (#14)
Reviewed-on: #14
Co-authored-by: Taha Hawa <taha@taha.dev>
Co-committed-by: Taha Hawa <taha@taha.dev>
2025-04-23 18:22:27 +00:00
8f470278a7 Merge pull request 'feat: introduce topology readiness and initialization' (#10) from feat/topologyDependencies into master
Reviewed-on: #10
Reviewed-by: taha <taha@noreply.git.nationtech.io>
2025-04-23 15:58:31 +00:00
213fb25686 feat: Use inquire::Confirm instead of raw std::io::Read for K8sAnywhere installation confirmation prompt 2025-04-23 11:56:55 -04:00
45668638e1 feat: TUI does not require Topology to implement Debug anymore 2025-04-23 11:17:03 -04:00
0857aba039 Switch HAClusterTopology for K8sAnywhereTopology in lamp example 2025-04-23 11:08:36 -04:00
452ebc2614 feat: add k3d installation interpret
Adds a new interpret for k3d installation. This includes defining the `K3dInstallationInterpret` struct, implementing the `Interpret` trait for it, and adding the `K3dInstallation` variant to the `InterpretName` enum. The implementation currently contains `todo!()` placeholders for the actual logic.
2025-04-23 10:54:54 -04:00
9e456bb4f5 chore: Refactor DownloadableAsset tests to use httptest instead of a local TcpListener 2025-04-23 10:54:54 -04:00
83ba0e1044 fix: Initialize K3DInstallationScore correctly 2025-04-23 10:54:54 -04:00
2229e9d7af chore: Cargo fmt 2025-04-23 10:54:54 -04:00
15785dd219 feat: download and install k3d latest release
- Implemented functionality to fetch the latest k3d release tag from GitHub.
- Added logic to determine the appropriate binary URL based on the current platform.
- Implemented downloading and saving the binary to a specified directory.
- Included unit tests to verify the download and installation process.
- Added a `K3D_BIN_FILE_NAME` constant for clarity.
- Added logging for better debugging.
2025-04-23 10:54:54 -04:00
847d84b46f wip: Started work on k3d crate 2025-04-23 10:54:54 -04:00
3f6f1fa0d4 wip: Implement basic K8sAnywhere setup with K3d support
- Added initial K8sAnywhere topology and related modules.
- Implemented a basic K3d installation score for cluster bootstrapping.
- Introduced LocalhostTopology for local development and testing.
- Added necessary module structure and dependencies.
- Implemented user prompt for K3d installation confirmation.
- Added basic error handling and logging.
- Refactored existing code to improve modularity and maintainability.
- Included necessary tests to ensure functionality.
2025-04-23 10:54:54 -04:00
6812d05849 feat: Introduce K8sAnywhereTopology and refactor Kubernetes interactions
This commit introduces a new topology, `K8sAnywhereTopology`, designed to handle Kubernetes deployments more flexibly.

Key changes include:

- Introduced `K8sAnywhereTopology` to encapsulate Kubernetes client management and configuration.
- Refactored existing Kubernetes-related code to utilize the new topology.
- Updated `OcK8sclient` to `K8sclient` across modules (k8s, lamp, deployment, resource) for consistency.
- Ensured all relevant modules now interface with Kubernetes through the `K8sclient` trait.

This change promotes a more modular and maintainable codebase for Kubernetes integrations within Harmony.
2025-04-23 10:54:54 -04:00
027114c48c feat: introduce topology readiness and initialization
Adds a `ensure_ready` method to the `Topology` trait to ensure the infrastructure is prepared before score execution.

- Introduces a new `Outcome` status to indicate the result of the readiness check.
- Implements a `topology_preparation_result` field in `Maestro` to track initialization status.
- Adds a check in `interpret` to warn if the topology isn't fully initialized.
- Provides detailed documentation for the `Topology` trait and `ensure_ready` method, including recommended patterns for complex setups.
- Adds `async_trait` dependency.
2025-04-23 10:54:54 -04:00
eeafa086f3 feat: Improve output of tui. From p-r tui-score-info (#11)
WIP: formatted score debug print into a table with a name header and the score information below
Co-authored-by: Jean-Gabriel Gill-Couture <jg@nationtech.io>
Reviewed-on: #11
Reviewed-by: johnride <jg@nationtech.io>
Co-authored-by: Willem <wrolleman@nationtech.io>
Co-committed-by: Willem <wrolleman@nationtech.io>
2025-04-23 14:54:32 +00:00
abd20b96a2 feat: harmony-cli v0.1 #8 (#9)
Co-authored-by: tahahawa <tahahawa@gmail.com>
Reviewed-on: #9
Reviewed-by: johnride <jg@nationtech.io>
Co-authored-by: Taha Hawa <taha@taha.dev>
Co-committed-by: Taha Hawa <taha@taha.dev>
2025-04-19 01:13:40 +00:00
0ba7f2536c docs: ADR for Helm Resource implementation style (#12)
Co-authored-by: tahahawa <tahahawa@gmail.com>
Reviewed-on: #12
2025-04-16 17:39:17 +00:00
3097e6af67 Merge branch 'adr/005-interactive-project' 2025-04-11 11:03:16 -04:00
606ea43b51 fix: improve tests and remove unused code
- Corrected XML test data to remove unnecessary `<descr>` tags, resolving failing tests.
- Removed the unused `ratatui_utils` module and its associated code.
- Simplified example in `harmony_tui/src/lib.rs` to use `tokio::main` and register scores directly with `Maestro`. This aligns with the project's evolving structure.
2025-04-11 11:01:05 -04:00
31ae8365a6 docs: add quick demo and core architecture overview
Adds a quick demo command using `cargo run -p example-tui` to launch a minimalist TUI with demo scores.

Also includes a core architecture diagram and overview in the README for better understanding of the project structure.
2025-04-09 16:09:54 -04:00
1cbf4de2a1 adr: proposal serde for score data representation and UI rendering
Decouples score definitions from UI implementations by mandating `serde::Serialize` and `serde::Deserialize` for all `Score` structs. UIs will interact with scores via their serialized representation, enabling scalability and reducing complexity for score authors.

This approach:

- Scales better with new score types and UI targets.
- Simplifies score authoring by removing the need for UI-specific display traits.
- Leverages the `serde` ecosystem for robust data handling.

Adding new field types requires updates to all UIs, a trade-off acknowledged in the ADR.
2025-04-05 14:38:58 -04:00
b4cc5cff4f feat: add serde derive to Score types
This commit adds `serde` dependency and derives `Serialize` trait for `Score` types. This is necessary for serialization and deserialization of these types, which is required to display Scores to various user interfaces

- Added `serde` dependency to `harmony_types/Cargo.toml`.
- Added `serde::Serialize` derive macro to `MacAddress` in `harmony_types/src/lib.rs`.
- Added `serde::Serialize` derive macro to `Config` in `opnsense-config/src/config/config.rs`.
- Added `serde::Serialize` derive macro to `Score` in `harmony_types/src/lib.rs`.
- Added `serde::Serialize` derive macro to `Config` and `Score` in relevant modules.
- Added placeholder `todo!()` implementations for `serialize` methods. These will be implemented in future commits.
2025-04-05 14:36:08 -04:00
ab9b7476a4 feat: add load balancer score and frontend integration
- Implemented `OKDLoadBalancerScore` and integrated it as a `FrontendScore`.
- Added `FrontendScore` trait for TUI displayable scores.
- Implemented `Display` for `OKDLoadBalancerScore`.
- Updated `ScoreListWidget` to handle `FrontendScore` types.
- Included load balancer score in the TUI.
2025-04-03 13:41:29 -04:00
e6384da57e Working on various ADR, cleaning up some stuff 2025-04-03 13:40:46 -04:00
2950235d23 Actualiser adr/006-secret-management.md 2025-03-10 04:31:06 +00:00
c8547e38f2 feat(ipxe): create empty score shell for ipxe 2025-03-02 12:14:04 -05:00
bfc79abfb6 feat(ipxe): added slitaz image (super small image with gui and tools) 2025-03-02 10:05:50 -05:00
7697a170bd feat(ipxe): setup to have MAC specific bootfiles and fallback to a default if not found 2025-03-02 09:37:11 -05:00
941c9bc0b0 fix: missing protocol for ipxe boot file 2025-03-02 07:59:30 -05:00
51aeea1ec9 feat: support new configurable field in dhcp config: filenameipxe 2025-03-01 10:51:01 -05:00
8118df85ee feat: support new configurable field in dhcp config: filename64 2025-03-01 10:41:41 -05:00
7af83910ef doc: fix 2025-03-01 10:29:22 -05:00
1475f4af0c doc: fix 2025-03-01 10:25:24 -05:00
a3a61c734f doc: update README.md with instructions on how to add a field in opnsense config.xml 2025-03-01 10:17:51 -05:00
3f77bc7aef feat: support new configurable field in dhcp config: filename 2025-03-01 08:56:41 -05:00
d5125dd811 feat(iPXE): adding files for iPXE and memtest86 image 2025-02-23 16:39:57 -05:00
1ca316c085 wip: added new xml fields for Caddy + legacy pxe filename 2025-02-22 14:24:51 -05:00
e390f1edb3 feat: started to prepare inventory / topoplogy for NCD 2025-02-22 11:12:28 -05:00
146 changed files with 14979 additions and 879 deletions

5
.cargo/config.toml Normal file
View File

@@ -0,0 +1,5 @@
[target.x86_64-pc-windows-msvc]
rustflags = ["-C", "link-arg=/STACK:8000000"]
[target.x86_64-pc-windows-gnu]
rustflags = ["-C", "link-arg=-Wl,--stack,8000000"]

2008
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -9,6 +9,8 @@ members = [
"harmony_tui",
"opnsense-config",
"opnsense-config-xml",
"harmony_cli",
"k3d",
]
[workspace.package]
@@ -21,21 +23,24 @@ log = "0.4.22"
env_logger = "0.11.5"
derive-new = "0.7.0"
async-trait = "0.1.82"
tokio = { version = "1.40.0", features = ["io-std", "fs"] }
tokio = { version = "1.40.0", features = ["io-std", "fs", "macros", "rt-multi-thread"] }
cidr = "0.2.3"
russh = "0.45.0"
russh-keys = "0.45.0"
rand = "0.8.5"
url = "2.5.4"
kube = "0.98.0"
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
k8s-openapi = { version = "0.24.0", features = ["v1_30"] }
serde_yaml = "0.9.34"
serde-value = "0.7.0"
http = "1.2.0"
inquire = "0.7.5"
convert_case = "0.8.0"
[workspace.dependencies.uuid]
version = "1.11.0"
features = [
"v4", # Lets you generate random UUIDs
"fast-rng", # Use a faster (but still sufficiently random) RNG
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
"v4", # Lets you generate random UUIDs
"fast-rng", # Use a faster (but still sufficiently random) RNG
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
]

172
README.md
View File

@@ -1,9 +1,171 @@
### Watch the whole repo on every change
# Harmony : Open Infrastructure Orchestration
Due to the current setup being a mix of separate repositories with gitignore and rust workspace, a few options are required for cargo-watch to have the desired behavior :
## Quick demo
`cargo run -p example-tui`
This will launch Harmony's minimalist terminal ui which embeds a few demo scores.
Usage instructions will be displayed at the bottom of the TUI.
`cargo run --bin example-cli -- --help`
This is the harmony CLI, a minimal implementation
The current help text:
````
Usage: example-cli [OPTIONS]
Options:
-y, --yes Run score(s) or not
-f, --filter <FILTER> Filter query
-i, --interactive Run interactive TUI or not
-a, --all Run all or nth, defaults to all
-n, --number <NUMBER> Run nth matching, zero indexed [default: 0]
-l, --list list scores, will also be affected by run filter
-h, --help Print help
-V, --version Print version```
## Core architecture
![Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg)
````
## Supporting a new field in OPNSense `config.xml`
Two steps:
- Supporting the field in `opnsense-config-xml`
- Enabling Harmony to control the field
We'll use the `filename` field in the `dhcpcd` section of the file as an example.
### Supporting the field
As type checking if enforced, every field from `config.xml` must be known by the code. Each subsection of `config.xml` has its `.rs` file. For the `dhcpcd` section, we'll modify `opnsense-config-xml/src/data/dhcpd.rs`.
When a new field appears in the xml file, an error like this will be thrown and Harmony will panic :
```
Running `/home/stremblay/nt/dir/harmony/target/debug/example-nanodc`
Found unauthorized element filename
thread 'main' panicked at opnsense-config-xml/src/data/opnsense.rs:54:14:
OPNSense received invalid string, should be full XML: ()
```sh
RUST_LOG=info cargo watch --ignore-nothing -w harmony -w private_repos/ -x 'run --bin nationtech'
```
This will run the nationtech bin (likely `private_repos/nationtech/src/main.rs`) on any change in the harmony or private_repos folders.
Define the missing field (`filename`) in the `DhcpInterface` struct of `opnsense-config-xml/src/data/dhcpd.rs`:
```
pub struct DhcpInterface {
...
pub filename: Option<String>,
```
Harmony should now be fixed, build and run.
### Controlling the field
Define the `xml field setter` in `opnsense-config/src/modules/dhcpd.rs`.
```
impl<'a> DhcpConfig<'a> {
...
pub fn set_filename(&mut self, filename: &str) {
self.enable_netboot();
self.get_lan_dhcpd().filename = Some(filename.to_string());
}
...
```
Define the `value setter` in the `DhcpServer trait` in `domain/topology/network.rs`
```
#[async_trait]
pub trait DhcpServer: Send + Sync {
...
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>;
...
```
Implement the `value setter` in each `DhcpServer` implementation.
`infra/opnsense/dhcp.rs`:
```
#[async_trait]
impl DhcpServer for OPNSenseFirewall {
...
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filename(filename);
debug!("OPNsense dhcp server set filename {filename}");
}
Ok(())
}
...
```
`domain/topology/ha_cluster.rs`
```
#[async_trait]
impl DhcpServer for DummyInfra {
...
async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
...
```
Add the new field to the DhcpScore in `modules/dhcp.rs`
```
pub struct DhcpScore {
...
pub filename: Option<String>,
```
Define it in its implementation in `modules/okd/dhcp.rs`
```
impl OKDDhcpScore {
...
Self {
dhcp_score: DhcpScore {
...
filename: Some("undionly.kpxe".to_string()),
```
Define it in its implementation in `modules/okd/bootstrap_dhcp.rs`
```
impl OKDDhcpScore {
...
Self {
dhcp_score: DhcpScore::new(
...
Some("undionly.kpxe".to_string()),
```
Update the interpret (function called by the `execute` fn of the interpret) so it now updates the `filename` field value in `modules/dhcp.rs`
```
impl DhcpInterpret {
...
let filename_outcome = match &self.score.filename {
Some(filename) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_filename(&filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename to {filename}"),
)
}
None => Outcome::noop(),
};
if next_server_outcome.status == InterpretStatus::NOOP
&& boot_filename_outcome.status == InterpretStatus::NOOP
&& filename_outcome.status == InterpretStatus::NOOP
...
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}]",
self.score.boot_filename, self.score.boot_filename, self.score.filename
)
...
```

33
adr/000-ADR-Template.md Normal file
View File

@@ -0,0 +1,33 @@
# Architecture Decision Record: \<Title\>
Name: \<Name\>
Initial Date: \<Date\>
Last Updated Date: \<Date\>
## Status
Proposed/Pending/Accepted/Implemented
## Context
The problem, background, the "why" behind this decision/discussion
## Decision
Proposed solution to the problem
## Rationale
Reasoning behind the decision
## Consequences
Pros/Cons of chosen solution
## Alternatives considered
Pros/Cons of various proposed solutions considered
## Additional Notes

View File

@@ -1,12 +1,18 @@
**Architecture Decision Record: Harmony Infrastructure Abstractions**
## Architecture Decision Record: Core Harmony Infrastructure Abstractions
**Status**: Proposed
## Status
**Context**: Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases.
Proposed
**Decision**: We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations.
## Context
**Example: Database Abstraction**
Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases.
## Decision
We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations.
### Example: Database Abstraction
To deploy a database to any cloud provider, we define an abstraction that includes essential elements such as:
```rust

View File

@@ -2,7 +2,7 @@
## Status
Draft
Proposal
## Context
@@ -31,30 +31,10 @@ This ADR will outline the approach taken to go from a LAMP project to be standal
## Decision
# Option 1 : Score spec
To simplify onboarding of existing projects, we decided to integrate with Score Spec for the following reasons :
# Custom Rust DSL
- It is a CNCF project, which helps a lot with adoption and community building
- It already supports important targets for us including docker-compose and k8s
- It provides a way to define the application's infrastructure at the correct level of abstraction for us to deploy it anywhere -- that is the goal of the score-spec project
- Once we evolve, we can simply have a score compatible provider that allows any project with a score spec to be deployed on the harmony stack
- Score was built with enterprise use-cases in mind : Humanitec platform engineering customers
## Consequences
### Positive
- Score Community is growing, using harmony will be very easy for them
### Negative
- Score is not that big yet, and mostly used by Humanitec's clients (I guess), which is a hard to penetrate environment
# Option 2 : Custom Rust DSL
We decided to develop a rust based DSL. Even though this means people will be afraid of "Rust", we believe the numerous advantages are worth the risk.
We decided to develop a rust based DSL. Even though this means people might be "afraid of Rust", we believe the numerous advantages are worth the risk.
The main selection criterias are :
@@ -76,3 +56,25 @@ The main selection criterias are :
- Lack of an existing community and ecosystem, which could slow down adoption.
- Increased maintenance overhead as the DSL needs to be updated and supported internally.
## Alternatives considered
### Score spec
We considered integrating with the score-spec project : https://github.com/score-spec/spec
The idea was to benefit from an existing community and ecosystem. The motivations to consider score were the following :
- It is a CNCF project, which helps a lot with adoption and community building
- It already supports important targets for us including docker-compose and k8s
- It provides a way to define the application's infrastructure at the correct level of abstraction for us to deploy it anywhere -- that is the goal of the score-spec project
- Once we evolve, we can simply have a score compatible provider that allows any project with a score spec to be deployed on the harmony stack
- Score was built with enterprise use-cases in mind : Humanitec platform engineering customers
Positive Consequences
- Score Community is growing, using harmony will be very easy for them
Negative Consequences
- Score is not that big yet, and mostly used by Humanitec's clients (I guess), which is a hard to penetrate environment

View File

@@ -4,7 +4,7 @@
Proposed
### TODO :
### TODO [#3](https://git.nationtech.io/NationTech/harmony/issues/3):
Before accepting this proposal we need to run a POC to validate this potential issue :

View File

@@ -0,0 +1,62 @@
## Architecture Decision Record: Data Representation and UI Rendering for Score Types
**Status:** Proposed
**TL;DR:** `Score` types will be serialized (using `serde`) for presentation in UIs. This decouples data definition from presentation, improving scalability and reducing complexity for developers defining `Score` types. New UI types only need to handle existing field types, and new `Score` types dont require UI changes as long as they use existing field types. Adding a new field type *does* require updates to all UIs.
**Key benefits:** Scalability, reduced complexity for `Score` authors, decoupling of data and presentation.
**Key trade-off:** Adding new field types requires updating all UIs.
---
**Context:**
Harmony is a pure Rust infrastructure orchestrator focused on compile-time safety and providing a developer-friendly, Ansible-module-like experience for defining infrastructure configurations via "Scores". These Scores (e.g., `LAMPScore`) are Rust structs composed of specific, strongly-typed fields (e.g., `VersionField`, `UrlField`, `PathField`) which are validated at compile-time using macros (`Version!`, `Url!`, etc.).
A key requirement is displaying the configuration defined in these Scores across various user interfaces (Web UI, TUI, potentially Mobile UI, etc.) in a consistent and type-safe manner. As the number of Score types is expected to grow significantly (hundreds or thousands), we need a scalable approach for rendering their data that avoids tightly coupling Score definitions to specific UI implementations.
The primary challenge is preventing the need for every `Score` struct author to implement multiple display traits (e.g., `Display`, `WebDisplay`, `TuiDisplay`) for every potential UI target. This would create an N x M complexity problem (N Scores * M UI types) and place an unreasonable burden on Score developers, hindering scalability and maintainability.
**Decision:**
1. **Mandatory Serialization:** All `Score` structs *must* implement `serde::Serialize` and `serde::Deserialize`. They *will not* be required to implement `std::fmt::Display` or any custom UI-specific display traits (e.g., `WebDisplay`, `TuiDisplay`).
2. **Field-Level Rendering:** Responsibility for rendering data will reside within the UI components. Each UI (Web, TUI, etc.) will implement logic to display *individual field types* (e.g., `UrlField`, `VersionField`, `IpAddressField`, `SecretField`).
3. **Data Access via Serialization:** UIs will primarily interact with `Score` data through its serialized representation (e.g., JSON obtained via `serde_json`). This provides a standardized interface for UIs to consume the data structure agnostic of the specific `Score` type. Alternatively, UIs *could* potentially use reflection or specific visitor patterns on the `Score` struct itself, but serialization is the preferred decoupling mechanism.
**Rationale:**
1. **Decoupling Data from Presentation:** This decision cleanly separates the data definition (`Score` structs and their fields) from the presentation logic (UI rendering). `Score` authors can focus solely on defining the data and its structure, while UI developers focus on how to best present known data *types*.
2. **Scalability:** This approach scales significantly better than requiring display trait implementations on Scores:
* Adding a *new Score type* requires *no changes* to existing UI code, provided it uses existing field types.
* Adding a *new UI type* requires implementing rendering logic only for the defined set of *field types*, not for every individual `Score` type. This reduces the N x M complexity to N + M complexity (approximately).
3. **Simplicity for Score Authors:** Requiring only `serde::Serialize + Deserialize` (which can often be derived automatically with `#[derive(Serialize, Deserialize)]`) is a much lower burden than implementing custom rendering logic for multiple, potentially unknown, UI targets.
4. **Leverages Rust Ecosystem Standards:** `serde` is the de facto standard for serialization and deserialization in Rust. Relying on it aligns with common Rust practices and benefits from its robustness, performance, and extensive tooling.
5. **Consistency for UIs:** Serialization provides a consistent, structured format (like JSON) for UIs to consume data, regardless of the underlying `Score` struct's complexity or composition.
6. **Flexibility for UI Implementation:** UIs can choose the best way to render each field type based on their capabilities (e.g., a `UrlField` might be a clickable link in a Web UI, plain text in a TUI; a `SecretField` might be masked).
**Consequences:**
**Positive:**
* Greatly improved scalability for adding new Score types and UI targets.
* Strong separation of concerns between data definition and presentation.
* Reduced implementation burden and complexity for Score authors.
* Consistent mechanism for UIs to access and interpret Score data.
* Aligns well with the Hexagonal Architecture (ADR-002) by treating UIs as adapters interacting with the application core via a defined port (the serialized data contract).
**Negative:**
* Adding a *new field type* (e.g., `EmailField`) requires updates to *all* existing UI implementations to support rendering it.
* UI components become dependent on the set of defined field types and need comprehensive logic to handle each one appropriately.
* Potential minor overhead of serialization/deserialization compared to direct function calls (though likely negligible for UI purposes).
* Requires careful design and management of the standard library of field types.
**Alternatives Considered:**
1. **`Score` Implements `std::fmt::Display`:**
* _Rejected:_ Too simplistic. Only suitable for basic text rendering, doesn't cater to structured UIs (Web, etc.), and doesn't allow type-specific rendering logic (e.g., masking secrets). Doesn't scale to multiple UI formats.
2. **`Score` Implements Multiple Custom Display Traits (`WebDisplay`, `TuiDisplay`, etc.):**
* _Rejected:_ Leads directly to the N x M complexity problem. Tightly couples Score definitions to specific UI implementations. Places an excessive burden on Score authors, hindering adoption and scalability.
3. **Generic Display Trait with Context (`Score` implements `DisplayWithContext<UIContext>`):**
* _Rejected:_ More flexible than multiple traits, but still requires Score authors to implement potentially complex rendering logic within the `Score` definition itself. The `Score` would still need awareness of different UI contexts, leading to undesirable coupling. Managing context types adds complexity.

View File

@@ -0,0 +1,61 @@
# Architecture Decision Record: Helm and Kustomize Handling
Name: Taha Hawa
Initial Date: 2025-04-15
Last Updated Date: 2025-04-15
## Status
Proposed
## Context
We need to find a way to handle Helm charts and deploy them to a Kubernetes cluster. Helm has a lot of extra functionality that we may or may not need. Kustomize handles Helm charts by inflating them and applying them as vanilla Kubernetes yaml. How should Harmony handle it?
## Decision
In order to move quickly and efficiently, Harmony should handle Helm charts similarly to how Kustomize does: invoke Helm to inflate/render the charts with the needed inputs, and deploy the rendered artifacts to Kubernetes as if it were vanilla manifests.
## Rationale
A lot of Helm's features aren't strictly necessary and would add unneeded overhead. This is likely the fastest way to go from zero to deployed. Other tools (e.g. Kustomize) already do this. Kustomize has tooling for patching and modifying k8s manifests before deploying, and Harmony should have that power too, even if it's not what Helm typically intends.
Perhaps in future also have a Kustomize resource in Harmony? Which could handle Helm charts for Harmony as well/instead.
## Consequences
**Pros**:
- Much easier (and faster) than implementing all of Helm's featureset
- Can potentially re-use code from K8sResource already present in Harmony
- Harmony retains more control over how the deployment goes after rendering (i.e. can act like Kustomize, or leverage Kustomize itself to modify deployments after rendering/inflation)
- Reduce (unstable) surface of dealing with Helm binary
**Cons**:
- Lose some Helm functionality
- Potentially lose some compatibility with Helm
## Alternatives considered
- ### Implement Helm resouce/client fully in Harmony
- **Pros**:
- Retain full compatibility with Helm as a tool
- Retain full functionality of Helm
- **Cons**:
- Longer dev time
- More complex integration
- Dealing with larger (unstable) surface of Helm as a binary
- ### Leverage Kustomize to deal with Helm charts
- **Pros**:
- Already has a good, minimal inflation solution built
- Powerful post-processing/patching
- Can integrate with `kubectl`
- **Cons**:
- Unstable binary tool/surface to deal with
- Still requires Helm to be installed as well as Kustomize
- Not all Helm features supported
## Additional Notes

View File

@@ -0,0 +1,68 @@
# Architecture Decision Record: Monitoring and Alerting
Proposed by: Willem Rolleman
Date: April 28 2025
## Status
Proposed
## Context
A harmony user should be able to initialize a monitoring stack easily, either at the first run of Harmony, or that integrates with existing proects and infra without creating multiple instances of the monitoring stack or overwriting existing alerts/configurations.The user also needs a simple way to configure the stack so that it watches the projects. There should be reasonable defaults configured that are easily customizable for each project
## Decision
Create MonitoringStack score that creates a maestro to launch the monitoring stack or not if it is already present.
The MonitoringStack score can be passed to the maestro in the vec! scores list
## Rationale
Having the score launch a maestro will allow the user to easily create a new monitoring stack and keeps composants grouped together. The MonitoringScore can handle all the logic for adding alerts, ensuring that the stack is running etc.
## Alerternatives considered
- ### Implement alerting and monitoring stack using existing HelmScore for each project
- **Pros**:
- Each project can choose to use the monitoring and alerting stack that they choose
- Less overhead in terms of care harmony code
- can add Box::new(grafana::grafanascore(namespace))
- **Cons**:
- No default solution implemented
- Dev needs to chose what they use
- Increases complexity of score projects
- Each project will create a new monitoring and alerting instance rather than joining the existing one
- ### Use OKD grafana and prometheus
- **Pros**:
- Minimal config to do in Harmony
- **Cons**:
- relies on OKD so will not working for local testing via k3d
- ### Create a monitoring and alerting crate similar to harmony tui
- **Pros**:
- Creates a default solution that can be implemented once by harmony
- can create a join function that will allow a project to connect to the existing solution
- eliminates risk of creating multiple instances of grafana or prometheus
- **Cons**:
- more complex than using a helm score
- management of values files for individual functions becomes more complicated, ie how do you create alerts for one project via helm install that doesnt overwrite the other alerts
- ### Add monitoring to Maestro struct so whether the monitoring stack is used must be defined
- **Pros**:
- less for the user to define
- may be easier to set defaults
- **Cons**:
- feels counterintuitive
- would need to modify the structure of the maestro and how it operates which seems like a bad idea
- unclear how to allow user to pass custom values/configs to the monitoring stack for subsequent projects
- ### Create MonitoringStack score to add to scores vec! which loads a maestro to install stack if not ready or add custom endpoints/alerts to existing stack
- **Pros**:
- Maestro already accepts a list of scores to initialize
- leaving out the monitoring score simply means the user does not want monitoring
- if the monitoring stack is already created, the MonitoringStack score doesn't necessarily need to be added to each project
- composants of the monitoring stack are bundled together and can be expaned or modified from the same place
- **Cons**:
- maybe need to create

View File

@@ -0,0 +1,360 @@
# Here is the current condenses architecture sample for Harmony's core abstractions
```rust
use std::process::Command;
pub trait Capability {}
pub trait CommandCapability: Capability {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String>;
}
pub trait KubernetesCapability: Capability {
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
}
pub trait Topology {
fn name(&self) -> &str;
}
pub trait Score<T: Topology> {
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String>;
fn name(&self) -> &str;
}
pub struct LinuxHostTopology {
name: String,
host: String,
}
impl Capability for LinuxHostTopology {}
impl LinuxHostTopology {
pub fn new(name: String, host: String) -> Self {
Self { name, host }
}
}
impl Topology for LinuxHostTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for LinuxHostTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
println!("Executing on {}: {} {:?}", self.host, command, args);
// In a real implementation, this would SSH to the host and execute the command
let output = Command::new(command)
.args(args)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}
}
pub struct K3DTopology {
name: String,
linux_host: LinuxHostTopology,
cluster_name: String,
}
impl Capability for K3DTopology {}
impl K3DTopology {
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
Self {
name,
linux_host,
cluster_name,
}
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for K3DTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
self.linux_host.execute_command(command, args)
}
}
impl KubernetesCapability for K3DTopology {
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
// Write manifest to a temporary file
let temp_file = format!("/tmp/manifest-harmony-temp.yaml");
// Use the linux_host directly to avoid capability trait bounds
self.linux_host
.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?;
// Apply with kubectl
self.linux_host.execute_command("kubectl", &[
"--context",
&format!("k3d-{}", self.cluster_name),
"apply",
"-f",
&temp_file,
])?;
Ok(())
}
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
println!(
"Getting resource {}/{} from K3D cluster '{}'",
resource_type, name, self.cluster_name
);
self.linux_host.execute_command("kubectl", &[
"--context",
&format!("k3d-{}", self.cluster_name),
"get",
resource_type,
name,
"-o",
"yaml",
])
}
}
pub struct CommandScore {
name: String,
command: String,
args: Vec<String>,
}
impl CommandScore {
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
Self {
name,
command,
args,
}
}
}
pub trait Interpret<T: Topology> {
fn execute(&self, topology: &T) -> Result<String, String>;
}
struct CommandInterpret;
impl<T> Interpret<T> for CommandInterpret
where
T: Topology + CommandCapability,
{
fn execute(&self, topology: &T) -> Result<String, String> {
todo!()
}
}
impl<T> Score<T> for CommandScore
where
T: Topology + CommandCapability,
{
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
Ok(Box::new(CommandInterpret {}))
}
fn name(&self) -> &str {
&self.name
}
}
#[derive(Clone)]
pub struct K8sResourceScore {
name: String,
manifest: String,
}
impl K8sResourceScore {
pub fn new(name: String, manifest: String) -> Self {
Self { name, manifest }
}
}
struct K8sResourceInterpret {
score: K8sResourceScore,
}
impl<T: Topology + KubernetesCapability> Interpret<T> for K8sResourceInterpret {
fn execute(&self, topology: &T) -> Result<String, String> {
todo!()
}
}
impl<T> Score<T> for K8sResourceScore
where
T: Topology + KubernetesCapability,
{
fn compile(&self) -> Result<Box<(dyn Interpret<T> + 'static)>, String> {
Ok(Box::new(K8sResourceInterpret {
score: self.clone(),
}))
}
fn name(&self) -> &str {
&self.name
}
}
pub struct Maestro<T: Topology> {
topology: T,
scores: Vec<Box<dyn Score<T>>>,
}
impl<T: Topology> Maestro<T> {
pub fn new(topology: T) -> Self {
Self {
topology,
scores: Vec::new(),
}
}
pub fn register_score<S>(&mut self, score: S)
where
S: Score<T> + 'static,
{
println!(
"Registering score '{}' for topology '{}'",
score.name(),
self.topology.name()
);
self.scores.push(Box::new(score));
}
pub fn orchestrate(&self) -> Result<(), String> {
println!("Orchestrating topology '{}'", self.topology.name());
for score in &self.scores {
let interpret = score.compile()?;
interpret.execute(&self.topology)?;
}
Ok(())
}
}
fn main() {
let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string());
let mut linux_maestro = Maestro::new(linux_host);
linux_maestro.register_score(CommandScore::new(
"check-disk".to_string(),
"df".to_string(),
vec!["-h".to_string()],
));
linux_maestro.orchestrate().unwrap();
// This would fail to compile if we tried to register a K8sResourceScore
// because LinuxHostTopology doesn't implement KubernetesCapability
//linux_maestro.register_score(K8sResourceScore::new(
// "...".to_string(),
// "...".to_string(),
//));
// Create a K3D topology which has both Command and Kubernetes capabilities
let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string());
let k3d_topology = K3DTopology::new(
"dev-cluster".to_string(),
k3d_host,
"devcluster".to_string(),
);
// Create a maestro for the K3D topology
let mut k3d_maestro = Maestro::new(k3d_topology);
// We can register both command scores and kubernetes scores
k3d_maestro.register_score(CommandScore::new(
"check-nodes".to_string(),
"kubectl".to_string(),
vec!["get".to_string(), "nodes".to_string()],
));
k3d_maestro.register_score(K8sResourceScore::new(
"deploy-nginx".to_string(),
r#"
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
"#
.to_string(),
));
// Orchestrate both topologies
linux_maestro.orchestrate().unwrap();
k3d_maestro.orchestrate().unwrap();
}
```
## Technical take
The key insight is that we might not need a complex TypeMap or runtime capability checking. Instead, we should leverage Rust's trait system to express capability requirements directly in the type system.
By clarifying the problem and focusing on type-level solutions rather than runtime checks, we can likely arrive at a simpler, more robust design that leverages the strengths of Rust's type system.
## Philosophical Shifts
1. **From Runtime to Compile-Time**: Move capability checking from runtime to compile-time.
2. **From Objects to Functions**: Think of scores less as objects and more as functions that transform topologies.
3. **From Homogeneous to Heterogeneous API**: Embrace different API paths for different capability combinations rather than trying to force everything through a single interface.
4. **From Complex to Simple**: Focus on making common cases simple, even if it means less abstraction for uncommon cases.
## High level concepts
The high level concepts so far has evolved towards this definition.
Topology -> Has -> Capabilities
Score -> Defines -> Work to be done / desired state
Interpret -> Requires -> Capabilities to execute a Score
Maestro -> Enforces -> Compatibility (through the type system at compile time)
## Why Harmony
The compile time safety is paramount here. Harmony's main goal is to make the entire software delivery pipeline robust. Current IaC tools are very hard to work with, require complex setups to test and debug real code.
Leveraging Rust's compiler allows us to shift left a lot of the complexities and frustration that comes with using tools like Ansible that is Yaml based and quickly becomes brittle at scale. Or Terraform, when running a `terraform plan` makes you think everything is correct only to fail horribly when confidently launching `terraform apply` and leaving you with tens or hundreds of resources to clean manually.
Of course, this requires a significant effort to get to the point where we have actually implemented all the logic.
But using Rust and a Type Driven Design approach, we believe we are providing a much more robust foundation for our customer's and user's deployments anywhere.
Also, having the full power of a mature programming language like Rust enables organizations and the community to customize their deployment any way they want, build upon it in a reliable way that has been evolved and proven over decades of enterprise dependency management, API definitions, etc.
===
Given all this c

View File

@@ -0,0 +1,9 @@
[package]
name = "example-topology2"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]

View File

@@ -0,0 +1,183 @@
// Clean capability-based design using type parameters
trait Capability {}
trait K8sCapability: Capability {
fn deploy_k8s_resource(&self, resource_yaml: &str);
fn execute_kubectl(&self, command: &str) -> String;
}
trait LinuxCapability: Capability {
fn execute_command(&self, command: &str, args: &[&str]);
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
}
trait LoadBalancerCapability: Capability {
fn configure_load_balancer(&self, services: &[&str], port: u16);
fn get_load_balancer_status(&self) -> String;
}
// Score trait with capability type parameter
trait Score<C: ?Sized> {
fn execute(&self, capability: &C) -> String;
}
// Topology implementations with marker trait
trait Topology {}
struct K3DTopology {}
impl Topology for K3DTopology {}
impl Capability for K3DTopology {}
impl K8sCapability for K3DTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
todo!()
}
fn execute_kubectl(&self, command: &str) -> String {
todo!()
}
// Implementation...
}
struct LinuxTopology {}
impl Topology for LinuxTopology {}
impl Capability for LinuxTopology {}
impl LinuxCapability for LinuxTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
todo!()
}
// Implementation...
}
struct OKDHaClusterTopology {}
impl Topology for OKDHaClusterTopology {}
impl Capability for OKDHaClusterTopology {}
impl K8sCapability for OKDHaClusterTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
todo!()
}
fn execute_kubectl(&self, command: &str) -> String {
todo!()
}
// Implementation...
}
impl LinuxCapability for OKDHaClusterTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
todo!()
}
// Implementation...
}
impl LoadBalancerCapability for OKDHaClusterTopology {
fn configure_load_balancer(&self, services: &[&str], port: u16) {
todo!()
}
fn get_load_balancer_status(&self) -> String {
todo!()
}
// Implementation...
}
// Score implementations
struct LAMPScore {}
impl Score<dyn K8sCapability> for LAMPScore {
fn execute(&self, capability: &dyn K8sCapability) -> String {
todo!()
// Implementation...
}
}
struct BinaryScore {}
impl Score<dyn LinuxCapability> for BinaryScore {
fn execute(&self, capability: &dyn LinuxCapability) -> String {
todo!()
// Implementation...
}
}
struct LoadBalancerScore {}
impl Score<dyn LoadBalancerCapability> for LoadBalancerScore {
fn execute(&self, capability: &dyn LoadBalancerCapability) -> String {
todo!()
// Implementation...
}
}
// Generic Maestro
struct Maestro<T> {
topology: T,
scores: Vec<Box<dyn FnMut(&T) -> String>>,
}
impl<T: 'static> Maestro<T> {
fn new(topology: T) -> Self {
Self {
topology,
scores: Vec::new(),
}
}
fn interpret_all(&mut self) -> Vec<String> {
self.scores.iter_mut()
.map(|score| score(&self.topology))
.collect()
}
}
// Capability-specific extensions
impl<T: K8sCapability + 'static> Maestro<T> {
fn register_k8s_score<S: Score<dyn K8sCapability> + 'static>(&mut self, score: S) {
let score_box = Box::new(move |topology: &T| {
score.execute(topology as &dyn K8sCapability)
});
self.scores.push(score_box);
}
}
impl<T: LinuxCapability + 'static> Maestro<T> {
fn register_linux_score<S: Score<dyn LinuxCapability> + 'static>(&mut self, score: S) {
let score_box = Box::new(move |topology: &T| {
score.execute(topology as &dyn LinuxCapability)
});
self.scores.push(score_box);
}
}
impl<T: LoadBalancerCapability + 'static> Maestro<T> {
fn register_lb_score<S: Score<dyn LoadBalancerCapability> + 'static>(&mut self, score: S) {
let score_box = Box::new(move |topology: &T| {
score.execute(topology as &dyn LoadBalancerCapability)
});
self.scores.push(score_box);
}
}
fn main() {
// Example usage
let k3d = K3DTopology {};
let mut k3d_maestro = Maestro::new(k3d);
// These will compile because K3D implements K8sCapability
k3d_maestro.register_k8s_score(LAMPScore {});
// This would not compile because K3D doesn't implement LoadBalancerCapability
// k3d_maestro.register_lb_score(LoadBalancerScore {});
let linux = LinuxTopology {};
let mut linux_maestro = Maestro::new(linux);
// This will compile because Linux implements LinuxCapability
linux_maestro.register_linux_score(BinaryScore {});
// This would not compile because Linux doesn't implement K8sCapability
// linux_maestro.register_k8s_score(LAMPScore {});
}

View File

@@ -0,0 +1,324 @@
fn main() {
// Create various topologies
let okd_topology = OKDHaClusterTopology::new();
let k3d_topology = K3DTopology::new();
let linux_topology = LinuxTopology::new();
// Create scores
let lamp_score = LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4");
let binary_score = BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"]);
let load_balancer_score = LoadBalancerScore::new(vec!["service1", "service2"], 80);
// Example 1: Running LAMP stack on OKD
println!("\n=== Deploying LAMP stack on OKD cluster ===");
lamp_score.execute(&okd_topology);
// Example 2: Running LAMP stack on K3D
println!("\n=== Deploying LAMP stack on K3D cluster ===");
lamp_score.execute(&k3d_topology);
// Example 3: Running binary on Linux host
println!("\n=== Running binary on Linux host ===");
binary_score.execute(&linux_topology);
// Example 4: Running binary on OKD (which can also run Linux commands)
println!("\n=== Running binary on OKD host ===");
binary_score.execute(&okd_topology);
// Example 5: Load balancer configuration on OKD
println!("\n=== Configuring load balancer on OKD ===");
load_balancer_score.execute(&okd_topology);
// The following would not compile:
// load_balancer_score.execute(&k3d_topology); // K3D doesn't implement LoadBalancerCapability
// lamp_score.execute(&linux_topology); // Linux doesn't implement K8sCapability
}
// Base Topology trait
trait Topology {
fn name(&self) -> &str;
}
// Define capabilities
trait K8sCapability {
fn deploy_k8s_resource(&self, resource_yaml: &str);
fn execute_kubectl(&self, command: &str) -> String;
}
trait OKDCapability: K8sCapability {
fn execute_oc(&self, command: &str) -> String;
}
trait LinuxCapability {
fn execute_command(&self, command: &str, args: &[&str]) -> String;
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
}
trait LoadBalancerCapability {
fn configure_load_balancer(&self, services: &[&str], port: u16);
fn get_load_balancer_status(&self) -> String;
}
trait FirewallCapability {
fn open_port(&self, port: u16, protocol: &str);
fn close_port(&self, port: u16, protocol: &str);
}
trait RouterCapability {
fn configure_route(&self, service: &str, hostname: &str);
}
// Topology implementations
struct OKDHaClusterTopology {
cluster_name: String,
}
impl OKDHaClusterTopology {
fn new() -> Self {
Self {
cluster_name: "okd-ha-cluster".to_string(),
}
}
}
impl Topology for OKDHaClusterTopology {
fn name(&self) -> &str {
&self.cluster_name
}
}
impl K8sCapability for OKDHaClusterTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on OKD cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on OKD cluster: {}", command);
"kubectl command output".to_string()
}
}
impl OKDCapability for OKDHaClusterTopology {
fn execute_oc(&self, command: &str) -> String {
println!("Executing oc command on OKD cluster: {}", command);
"oc command output".to_string()
}
}
impl LinuxCapability for OKDHaClusterTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> String {
println!(
"Executing command '{}' with args {:?} on OKD node",
command, args
);
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!(
"Downloading file from {} to {} on OKD node",
url, destination
);
Ok(())
}
}
impl LoadBalancerCapability for OKDHaClusterTopology {
fn configure_load_balancer(&self, services: &[&str], port: u16) {
println!(
"Configuring load balancer for services {:?} on port {} in OKD",
services, port
);
}
fn get_load_balancer_status(&self) -> String {
"OKD Load Balancer: HEALTHY".to_string()
}
}
impl FirewallCapability for OKDHaClusterTopology {
fn open_port(&self, port: u16, protocol: &str) {
println!(
"Opening port {} with protocol {} on OKD firewall",
port, protocol
);
}
fn close_port(&self, port: u16, protocol: &str) {
println!(
"Closing port {} with protocol {} on OKD firewall",
port, protocol
);
}
}
impl RouterCapability for OKDHaClusterTopology {
fn configure_route(&self, service: &str, hostname: &str) {
println!(
"Configuring route for service {} with hostname {} on OKD",
service, hostname
);
}
}
struct K3DTopology {
cluster_name: String,
}
impl K3DTopology {
fn new() -> Self {
Self {
cluster_name: "k3d-local".to_string(),
}
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.cluster_name
}
}
impl K8sCapability for K3DTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on K3D cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on K3D cluster: {}", command);
"kubectl command output from K3D".to_string()
}
}
struct LinuxTopology {
hostname: String,
}
impl LinuxTopology {
fn new() -> Self {
Self {
hostname: "linux-host".to_string(),
}
}
}
impl Topology for LinuxTopology {
fn name(&self) -> &str {
&self.hostname
}
}
impl LinuxCapability for LinuxTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> String {
println!(
"Executing command '{}' with args {:?} on Linux host",
command, args
);
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!(
"Downloading file from {} to {} on Linux host",
url, destination
);
Ok(())
}
}
// Score implementations
struct LAMPScore {
mysql_version: String,
php_version: String,
apache_version: String,
}
impl LAMPScore {
fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self {
Self {
mysql_version: mysql_version.to_string(),
php_version: php_version.to_string(),
apache_version: apache_version.to_string(),
}
}
fn execute<T: K8sCapability>(&self, topology: &T) {
// Deploy MySQL
topology.deploy_k8s_resource("mysql-deployment.yaml");
// Deploy PHP
topology.deploy_k8s_resource("php-deployment.yaml");
// Deploy Apache
topology.deploy_k8s_resource("apache-deployment.yaml");
// Create service
topology.deploy_k8s_resource("lamp-service.yaml");
// Check deployment
let status = topology.execute_kubectl("get pods -l app=lamp");
println!("LAMP deployment status: {}", status);
}
}
struct BinaryScore {
url: String,
args: Vec<String>,
}
impl BinaryScore {
fn new(url: &str, args: Vec<&str>) -> Self {
Self {
url: url.to_string(),
args: args.iter().map(|s| s.to_string()).collect(),
}
}
fn execute<T: LinuxCapability>(&self, topology: &T) {
let destination = "/tmp/binary";
match topology.download_file(&self.url, destination) {
Ok(_) => {
println!("Binary downloaded successfully");
// Convert args to slice of &str
let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
// Execute the binary
topology.execute_command(destination, &args);
println!("Binary execution completed");
}
Err(e) => {
println!("Failed to download binary: {}", e);
}
}
}
}
struct LoadBalancerScore {
services: Vec<String>,
port: u16,
}
impl LoadBalancerScore {
fn new(services: Vec<&str>, port: u16) -> Self {
Self {
services: services.iter().map(|s| s.to_string()).collect(),
port,
}
}
fn execute<T: LoadBalancerCapability>(&self, topology: &T) {
println!("Configuring load balancer for services");
// Convert services to slice of &str
let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect();
// Configure load balancer
topology.configure_load_balancer(&services, self.port);
// Check status
let status = topology.get_load_balancer_status();
println!("Load balancer status: {}", status);
}
}

View File

@@ -0,0 +1,34 @@
fn main() {}
trait Topology {}
struct DummyTopology {}
impl Topology for DummyTopology {}
impl Topology for LampTopology {}
struct LampTopology {}
struct Maestro {
topology: Box<dyn Topology>,
}
trait Score {
type Topology: Topology;
fn execute(&self, topology: &Self::Topology);
}
struct K8sScore {}
impl Score for K8sScore {
type Topology = LampTopology;
fn execute(&self, topology: &Box<dyn Self::Topology>) {
todo!()
}
}
impl Maestro {
pub fn execute<T: Topology>(&self, score: Box<dyn Score<Topology = T>>) {
score.execute(&self.topology);
}
}

View File

@@ -0,0 +1,76 @@
fn main() {
// Example usage
let lamp_topology = LampTopology {};
let k8s_score = K8sScore {};
let docker_topology = DockerTopology{};
// Type-safe execution
let maestro = Maestro::new(Box::new(docker_topology));
maestro.execute(&k8s_score); // This will work
// This would fail at compile time if we tried:
// let dummy_topology = DummyTopology {};
// let maestro = Maestro::new(Box::new(dummy_topology));
// maestro.execute(&k8s_score); // Error: expected LampTopology, found DummyTopology
}
// Base trait for all topologies
trait Topology {
// Common topology methods could go here
fn topology_type(&self) -> &str;
}
struct DummyTopology {}
impl Topology for DummyTopology {
fn topology_type(&self) -> &str { "Dummy" }
}
struct LampTopology {}
impl Topology for LampTopology {
fn topology_type(&self) -> &str { "LAMP" }
}
struct DockerTopology {}
impl Topology for DockerTopology {
fn topology_type(&self) -> &str {
todo!("DockerTopology")
}
}
// The Score trait with an associated type for the required topology
trait Score {
type RequiredTopology: Topology + ?Sized;
fn execute(&self, topology: &Self::RequiredTopology);
fn score_type(&self) -> &str;
}
// A score that requires LampTopology
struct K8sScore {}
impl Score for K8sScore {
type RequiredTopology = DockerTopology;
fn execute(&self, topology: &Self::RequiredTopology) {
println!("Executing K8sScore on {} topology", topology.topology_type());
// Implementation details...
}
fn score_type(&self) -> &str { "K8s" }
}
// A generic maestro that can work with any topology type
struct Maestro<T: Topology + ?Sized> {
topology: Box<T>,
}
impl<T: Topology + ?Sized> Maestro<T> {
pub fn new(topology: Box<T>) -> Self {
Maestro { topology }
}
// Execute a score that requires this specific topology type
pub fn execute<S: Score<RequiredTopology = T>>(&self, score: &S) {
println!("Maestro executing {} score", score.score_type());
score.execute(&*self.topology);
}
}

View File

@@ -0,0 +1,360 @@
fn main() {
// Create topologies
let okd_topology = OKDHaClusterTopology::new();
let k3d_topology = K3DTopology::new();
let linux_topology = LinuxTopology::new();
// Create scores - boxing them as trait objects for dynamic dispatch
let scores: Vec<Box<dyn Score>> = vec![
Box::new(LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4")),
Box::new(BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"])),
Box::new(LoadBalancerScore::new(vec!["service1", "service2"], 80)),
];
// Running scores on OKD topology (which has all capabilities)
println!("\n=== Running all scores on OKD HA Cluster ===");
for score in &scores {
match score.execute(&okd_topology) {
Ok(result) => println!("Score executed successfully: {}", result),
Err(e) => println!("Failed to execute score: {}", e),
}
}
// Running scores on K3D topology (only has K8s capability)
println!("\n=== Running scores on K3D Cluster ===");
for score in &scores {
match score.execute(&k3d_topology) {
Ok(result) => println!("Score executed successfully: {}", result),
Err(e) => println!("Failed to execute score: {}", e),
}
}
// Running scores on Linux topology (only has Linux capability)
println!("\n=== Running scores on Linux Host ===");
for score in &scores {
match score.execute(&linux_topology) {
Ok(result) => println!("Score executed successfully: {}", result),
Err(e) => println!("Failed to execute score: {}", e),
}
}
}
// Base Topology trait
trait Topology: Any {
fn name(&self) -> &str;
// This method allows us to get type information at runtime
fn as_any(&self) -> &dyn Any;
}
// Use Any trait for runtime type checking
use std::any::Any;
// Define capabilities
trait K8sCapability {
fn deploy_k8s_resource(&self, resource_yaml: &str);
fn execute_kubectl(&self, command: &str) -> String;
}
trait OKDCapability: K8sCapability {
fn execute_oc(&self, command: &str) -> String;
}
trait LinuxCapability {
fn execute_command(&self, command: &str, args: &[&str]);
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
}
trait LoadBalancerCapability {
fn configure_load_balancer(&self, services: &[&str], port: u16);
fn get_load_balancer_status(&self) -> String;
}
// Base Score trait with dynamic dispatch
trait Score {
// Generic execute method that takes any topology
fn execute(&self, topology: &dyn Topology) -> Result<String, String>;
// Optional method to get score type for better error messages
fn score_type(&self) -> &str;
}
// Topology implementations
struct OKDHaClusterTopology {
cluster_name: String,
}
impl OKDHaClusterTopology {
fn new() -> Self {
Self { cluster_name: "okd-ha-cluster".to_string() }
}
}
impl Topology for OKDHaClusterTopology {
fn name(&self) -> &str {
&self.cluster_name
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl K8sCapability for OKDHaClusterTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on OKD cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on OKD cluster: {}", command);
"kubectl command output".to_string()
}
}
impl OKDCapability for OKDHaClusterTopology {
fn execute_oc(&self, command: &str) -> String {
println!("Executing oc command on OKD cluster: {}", command);
"oc command output".to_string()
}
}
impl LinuxCapability for OKDHaClusterTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
println!("Executing command '{}' with args {:?} on OKD node", command, args);
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!("Downloading file from {} to {} on OKD node", url, destination);
Ok(())
}
}
impl LoadBalancerCapability for OKDHaClusterTopology {
fn configure_load_balancer(&self, services: &[&str], port: u16) {
println!("Configuring load balancer for services {:?} on port {} in OKD", services, port);
}
fn get_load_balancer_status(&self) -> String {
"OKD Load Balancer: HEALTHY".to_string()
}
}
struct K3DTopology {
cluster_name: String,
}
impl K3DTopology {
fn new() -> Self {
Self { cluster_name: "k3d-local".to_string() }
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.cluster_name
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl K8sCapability for K3DTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on K3D cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on K3D cluster: {}", command);
"kubectl command output from K3D".to_string()
}
}
struct LinuxTopology {
hostname: String,
}
impl LinuxTopology {
fn new() -> Self {
Self { hostname: "linux-host".to_string() }
}
}
impl Topology for LinuxTopology {
fn name(&self) -> &str {
&self.hostname
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl LinuxCapability for LinuxTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
println!("Executing command '{}' with args {:?} on Linux host", command, args);
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!("Downloading file from {} to {} on Linux host", url, destination);
Ok(())
}
}
// Score implementations using dynamic capability checks
struct LAMPScore {
mysql_version: String,
php_version: String,
apache_version: String,
}
impl LAMPScore {
fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self {
Self {
mysql_version: mysql_version.to_string(),
php_version: php_version.to_string(),
apache_version: apache_version.to_string(),
}
}
// Helper method for typesafe execution
fn execute_with_k8s(&self, topology: &dyn K8sCapability) -> String {
println!("Deploying LAMP stack with MySQL {}, PHP {}, Apache {}",
self.mysql_version, self.php_version, self.apache_version);
// Deploy MySQL
topology.deploy_k8s_resource("mysql-deployment.yaml");
// Deploy PHP
topology.deploy_k8s_resource("php-deployment.yaml");
// Deploy Apache
topology.deploy_k8s_resource("apache-deployment.yaml");
// Create service
topology.deploy_k8s_resource("lamp-service.yaml");
// Check deployment
let status = topology.execute_kubectl("get pods -l app=lamp");
format!("LAMP deployment status: {}", status)
}
}
impl Score for LAMPScore {
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
// Try to downcast to K8sCapability
if let Some(k8s_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
Ok(self.execute_with_k8s(k8s_topology))
} else if let Some(k8s_topology) = topology.as_any().downcast_ref::<K3DTopology>() {
Ok(self.execute_with_k8s(k8s_topology))
} else {
Err(format!("LAMPScore requires K8sCapability but topology {} doesn't provide it",
topology.name()))
}
}
fn score_type(&self) -> &str {
"LAMP"
}
}
struct BinaryScore {
url: String,
args: Vec<String>,
}
impl BinaryScore {
fn new(url: &str, args: Vec<&str>) -> Self {
Self {
url: url.to_string(),
args: args.iter().map(|s| s.to_string()).collect(),
}
}
// Helper method for typesafe execution
fn execute_with_linux(&self, topology: &dyn LinuxCapability) -> Result<String, String> {
let destination = "/tmp/binary";
// Download the binary
println!("Preparing to run binary from {}", self.url);
match topology.download_file(&self.url, destination) {
Ok(_) => {
println!("Binary downloaded successfully");
// Convert args to slice of &str
let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
// Execute the binary
topology.execute_command(destination, &args);
Ok("Binary execution completed successfully".to_string())
},
Err(e) => {
Err(format!("Failed to download binary: {}", e))
}
}
}
}
impl Score for BinaryScore {
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
// Try to downcast to LinuxCapability
if let Some(linux_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
self.execute_with_linux(linux_topology)
} else if let Some(linux_topology) = topology.as_any().downcast_ref::<LinuxTopology>() {
self.execute_with_linux(linux_topology)
} else {
Err(format!("BinaryScore requires LinuxCapability but topology {} doesn't provide it",
topology.name()))
}
}
fn score_type(&self) -> &str {
"Binary"
}
}
struct LoadBalancerScore {
services: Vec<String>,
port: u16,
}
impl LoadBalancerScore {
fn new(services: Vec<&str>, port: u16) -> Self {
Self {
services: services.iter().map(|s| s.to_string()).collect(),
port,
}
}
// Helper method for typesafe execution
fn execute_with_lb(&self, topology: &dyn LoadBalancerCapability) -> String {
println!("Configuring load balancer for services");
// Convert services to slice of &str
let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect();
// Configure load balancer
topology.configure_load_balancer(&services, self.port);
// Check status
let status = topology.get_load_balancer_status();
format!("Load balancer configured successfully. Status: {}", status)
}
}
impl Score for LoadBalancerScore {
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
// Only OKDHaClusterTopology implements LoadBalancerCapability
if let Some(lb_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
Ok(self.execute_with_lb(lb_topology))
} else {
Err(format!("LoadBalancerScore requires LoadBalancerCapability but topology {} doesn't provide it",
topology.name()))
}
}
fn score_type(&self) -> &str {
"LoadBalancer"
}
}

5
check.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
cargo check --all-targets --all-features --keep-going
cargo fmt --check
cargo test

View File

@@ -0,0 +1 @@
slitaz/* filter=lfs diff=lfs merge=lfs -text

View File

@@ -0,0 +1,6 @@
#!ipxe
set base-url http://192.168.33.1:8080
set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe
chain ${hostfile} || chain ${base-url}/default.ipxe

View File

@@ -0,0 +1,35 @@
#!ipxe
menu PXE Boot Menu - [${mac}]
item okdinstallation Install OKD
item slitaz Boot to Slitaz - old linux for debugging
choose selected
goto ${selected}
:local
exit
#################################
# okdinstallation
#################################
:okdinstallation
set base-url http://192.168.33.1:8080
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
set install-disk /dev/nvme0n1
set ignition-file ncd0/master.ign
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
initrd --name main ${base-url}/${live-initramfs}
boot
#################################
# slitaz
#################################
:slitaz
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/slitaz
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
initrd ${base_url}/rootfs.gz
boot

View File

@@ -0,0 +1,35 @@
#!ipxe
menu PXE Boot Menu - [${mac}]
item okdinstallation Install OKD
item slitaz Boot to Slitaz - old linux for debugging
choose selected
goto ${selected}
:local
exit
#################################
# okdinstallation
#################################
:okdinstallation
set base-url http://192.168.33.1:8080
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
set install-disk /dev/nvme0n1
set ignition-file ncd0/master.ign
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
initrd --name main ${base-url}/${live-initramfs}
boot
#################################
# slitaz
#################################
:slitaz
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/slitaz
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
initrd ${base_url}/rootfs.gz
boot

View File

@@ -0,0 +1,35 @@
#!ipxe
menu PXE Boot Menu - [${mac}]
item okdinstallation Install OKD
item slitaz Slitaz - an old linux image for debugging
choose selected
goto ${selected}
:local
exit
#################################
# okdinstallation
#################################
:okdinstallation
set base-url http://192.168.33.1:8080
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
set install-disk /dev/sda
set ignition-file ncd0/worker.ign
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
initrd --name main ${base-url}/${live-initramfs}
boot
#################################
# slitaz
#################################
:slitaz
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/slitaz
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
initrd ${base_url}/rootfs.gz
boot

View File

@@ -0,0 +1,35 @@
#!ipxe
menu PXE Boot Menu - [${mac}]
item okdinstallation Install OKD
item slitaz Boot to Slitaz - old linux for debugging
choose selected
goto ${selected}
:local
exit
#################################
# okdinstallation
#################################
:okdinstallation
set base-url http://192.168.33.1:8080
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
set install-disk /dev/nvme0n1
set ignition-file ncd0/master.ign
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
initrd --name main ${base-url}/${live-initramfs}
boot
#################################
# slitaz
#################################
:slitaz
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/slitaz
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
initrd ${base_url}/rootfs.gz
boot

View File

@@ -0,0 +1,35 @@
#!ipxe
menu PXE Boot Menu - [${mac}]
item okdinstallation Install OKD
item slitaz Slitaz - an old linux image for debugging
choose selected
goto ${selected}
:local
exit
#################################
# okdinstallation
#################################
:okdinstallation
set base-url http://192.168.33.1:8080
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
set install-disk /dev/sda
set ignition-file ncd0/worker.ign
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
initrd --name main ${base-url}/${live-initramfs}
boot
#################################
# slitaz
#################################
:slitaz
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/slitaz
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
initrd ${base_url}/rootfs.gz
boot

View File

@@ -0,0 +1,37 @@
#!ipxe
menu PXE Boot Menu - [${mac}]
item okdinstallation Install OKD
item slitaz Slitaz - an old linux image for debugging
choose selected
goto ${selected}
:local
exit
# This is the bootstrap node
# it will become wk2
#################################
# okdinstallation
#################################
:okdinstallation
set base-url http://192.168.33.1:8080
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
set install-disk /dev/sda
set ignition-file ncd0/worker.ign
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
initrd --name main ${base-url}/${live-initramfs}
boot
#################################
# slitaz
#################################
:slitaz
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/slitaz
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
initrd ${base_url}/rootfs.gz
boot

View File

@@ -0,0 +1,71 @@
#!ipxe
menu PXE Boot Menu - [${mac}]
item local Boot from Hard Disk
item slitaz Boot slitaz live environment [tux|root:root]
#item ubuntu-server Ubuntu 24.04.1 live server
#item ubuntu-desktop Ubuntu 24.04.1 desktop
#item systemrescue System Rescue 11.03
item memtest memtest
#choose --default local --timeout 5000 selected
choose selected
goto ${selected}
:local
exit
#################################
# slitaz
#################################
:slitaz
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/slitaz
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
initrd ${base_url}/rootfs.gz
boot
#################################
# Ubuntu Server
#################################
:ubuntu-server
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/ubuntu/live-server-24.04.1
kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-live-server-amd64.iso autoinstall ds=nocloud
initrd ${base_url}/initrd
boot
#################################
# Ubuntu Desktop
#################################
:ubuntu-desktop
set server_ip 192.168.33.1:8080
set base_url http://${server_ip}/ubuntu/desktop-24.04.1
kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-desktop-amd64.iso autoinstall ds=nocloud
initrd ${base_url}/initrd
boot
#################################
# System Rescue
#################################
:systemrescue
set base-url http://192.168.33.1:8080/systemrescue
kernel ${base-url}/vmlinuz initrd=sysresccd.img boot=systemrescue docache
initrd ${base-url}/sysresccd.img
boot
#################################
# MemTest86 (BIOS/UEFI)
#################################
:memtest
iseq ${platform} efi && goto memtest_efi || goto memtest_bios
:memtest_efi
kernel http://192.168.33.1:8080/memtest/memtest64.efi
boot
:memtest_bios
kernel http://192.168.33.1:8080/memtest/memtest64.bin
boot

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1 +0,0 @@
hey i am paul

BIN
data/watchguard/pxe-http-files/slitaz/rootfs.gz (Stored with Git LFS) Normal file

Binary file not shown.

BIN
data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz (Stored with Git LFS) Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 1.1 MiB

19
examples/cli/Cargo.toml Normal file
View File

@@ -0,0 +1,19 @@
[package]
name = "example-cli"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
assert_cmd = "2.0.16"

20
examples/cli/src/main.rs Normal file
View File

@@ -0,0 +1,20 @@
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::LocalhostTopology,
};
#[tokio::main]
async fn main() {
let inventory = Inventory::autoload();
let topology = LocalhostTopology::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@@ -18,3 +18,4 @@ kube = "0.98.0"
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
http = "1.2.0"
serde_yaml = "0.9.34"
inquire.workspace = true

View File

@@ -1,20 +1,32 @@
use std::collections::BTreeMap;
use harmony_macros::yaml;
use inquire::Confirm;
use k8s_openapi::{
api::{
apps::v1::{Deployment, DeploymentSpec},
core::v1::{Container, Node, Pod, PodSpec, PodTemplateSpec},
core::v1::{Container, PodSpec, PodTemplateSpec},
},
apimachinery::pkg::apis::meta::v1::LabelSelector,
};
use kube::{
Api, Client, Config, ResourceExt,
api::{ListParams, ObjectMeta, PostParams},
Api, Client, ResourceExt,
api::{ObjectMeta, PostParams},
};
#[tokio::main]
async fn main() {
let confirmation = Confirm::new(
"This will install various ressources to your default kubernetes cluster. Are you sure?",
)
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
if !confirmation {
return;
}
let client = Client::try_default()
.await
.expect("Should instanciate client from defaults");
@@ -42,8 +54,7 @@ async fn main() {
// println!("found node {} status {:?}", n.name_any(), n.status.unwrap())
// }
let nginxdeployment = nginx_deployment_2();
let nginxdeployment = nginx_deployment_serde();
assert_eq!(nginx_deployment(), nginx_macro());
assert_eq!(nginx_deployment_2(), nginx_macro());
assert_eq!(nginx_deployment_serde(), nginx_macro());
let nginxdeployment = nginx_macro();
@@ -149,6 +160,7 @@ fn nginx_deployment_2() -> Deployment {
deployment
}
fn nginx_deployment() -> Deployment {
let deployment = Deployment {
metadata: ObjectMeta {

View File

@@ -8,7 +8,7 @@ publish = false
[dependencies]
harmony = { path = "../../harmony" }
#harmony_tui = { path = "../../harmony_tui" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
@@ -16,3 +16,5 @@ harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
typetag = "0.2.20"
serde = "1.0.219"

View File

@@ -1,3 +1,85 @@
<?php
print_r("Hello this is from PHP")
ini_set('display_errors', 1);
error_reporting(E_ALL);
$host = getenv('MYSQL_HOST') ?: '';
$user = getenv('MYSQL_USER') ?: 'root';
$pass = getenv('MYSQL_PASSWORD') ?: '';
$db = 'testfill';
$charset = 'utf8mb4';
$dsn = "mysql:host=$host;charset=$charset";
$options = [
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
PDO::ATTR_DEFAULT_FETCH_MODE => PDO::FETCH_ASSOC,
];
try {
$pdo = new PDO($dsn, $user, $pass, $options);
$pdo->exec("CREATE DATABASE IF NOT EXISTS `$db`");
$pdo->exec("USE `$db`");
$pdo->exec("
CREATE TABLE IF NOT EXISTS filler (
id INT AUTO_INCREMENT PRIMARY KEY,
data LONGBLOB
)
");
} catch (\PDOException $e) {
die("❌ DB connection failed: " . $e->getMessage());
}
function getDbStats($pdo, $db) {
$stmt = $pdo->query("
SELECT
ROUND(SUM(data_length + index_length) / 1024 / 1024 / 1024, 2) AS total_size_gb,
SUM(table_rows) AS total_rows
FROM information_schema.tables
WHERE table_schema = '$db'
");
$result = $stmt->fetch();
$sizeGb = $result['total_size_gb'] ?? '0';
$rows = $result['total_rows'] ?? '0';
$avgMb = ($rows > 0) ? round(($sizeGb * 1024) / $rows, 2) : 0;
return [$sizeGb, $rows, $avgMb];
}
list($dbSize, $rowCount, $avgRowMb) = getDbStats($pdo, $db);
$message = '';
if ($_SERVER['REQUEST_METHOD'] === 'POST' && isset($_POST['fill'])) {
$iterations = 1024;
$data = str_repeat(random_bytes(1024), 1024); // 1MB
$stmt = $pdo->prepare("INSERT INTO filler (data) VALUES (:data)");
for ($i = 0; $i < $iterations; $i++) {
$stmt->execute([':data' => $data]);
}
list($dbSize, $rowCount, $avgRowMb) = getDbStats($pdo, $db);
$message = "<p style='color: green;'>✅ 1GB inserted into MariaDB successfully.</p>";
}
?>
<!DOCTYPE html>
<html>
<head>
<title>MariaDB Filler</title>
</head>
<body>
<h1>MariaDB Storage Filler</h1>
<?= $message ?>
<ul>
<li><strong>📦 MariaDB Used Size:</strong> <?= $dbSize ?> GB</li>
<li><strong>📊 Total Rows:</strong> <?= $rowCount ?></li>
<li><strong>📐 Average Row Size:</strong> <?= $avgRowMb ?> MB</li>
</ul>
<form method="post">
<button name="fill" value="1" type="submit">Insert 1GB into DB</button>
</form>
</body>
</html>

View File

@@ -1,29 +1,74 @@
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::lamp::{LAMPConfig, LAMPScore},
score::Score,
topology::{HAClusterTopology, Topology, Url},
modules::{
lamp::{LAMPConfig, LAMPScore},
monitoring::{kube_prometheus::prometheus_alert_channel::{DiscordChannel, SlackChannel}, monitoring_alerting::MonitoringAlertingScore},
},
topology::{K8sAnywhereTopology, Url},
};
#[tokio::main]
async fn main() {
// This here is the whole configuration to
// - setup a local K3D cluster
// - Build a docker image with the PHP project builtin and production grade settings
// - Deploy a mariadb database using a production grade helm chart
// - Deploy the new container using a kubernetes deployment
// - Configure networking between the PHP container and the database
// - Provision a public route and an SSL certificate automatically on production environments
//
// Enjoy :)
let lamp_stack = LAMPScore {
name: "harmony-lamp-demo".to_string(),
domain: Url::Url(url::Url::parse("https://lampdemo.harmony.nationtech.io").unwrap()),
php_version: Version::from("8.4.4").unwrap(),
// This config can be extended as needed for more complicated configurations
config: LAMPConfig {
project_root: "./php".into(),
database_size: format!("4Gi").into(),
..Default::default()
},
};
Maestro::<HAClusterTopology>::load_from_env()
.interpret(Box::new(lamp_stack))
.await
.unwrap();
}
fn clone_score<T: Topology, S: Score<T> + Clone + 'static>(score: S) -> Box<S> {
Box::new(score.clone())
// You can choose the type of Topology you want, we suggest starting with the
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
// locally, to development environment from a CI, to staging, and to production with settings
// that automatically adapt to each environment grade.
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize (
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
let url = url::Url::parse(
"https://hooks.slack.com/services/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",
)
.expect("invalid URL");
let mut monitoring_stack_score = MonitoringAlertingScore::new();
monitoring_stack_score.namespace = Some(lamp_stack.config.namespace.clone());
monitoring_stack_score.alert_channels = vec![(Box::new(SlackChannel {
name: "alert-test".to_string(),
webhook_url: url,})),
(Box::new(DiscordChannel {
name: "discord".to_string(),
webhook_url: url::Url::parse("https://discord.com/api/webhooks/1372994201746276462/YRn4TA9pj8ve3lfmyj1j0Yx97i92gv4U_uavt4CV4_SSIVArYUqfDzMOmzSTic2d8XSL").expect("invalid URL"),}))];
//TODO in process of testing
//webhook depricated in MSTeams August 2025
//(AlertChannel::MSTeams {
// connector: "alert-test".to_string(),
// webhook_url: url::Url::parse("").expect("invalid URL"),
//}),
maestro.register_all(vec![Box::new(monitoring_stack_score)]);
// Here we bootstrap the CLI, this gives some nice features if you need them
harmony_cli::init(maestro, None).await.unwrap();
}
// That's it, end of the infra as code.

View File

@@ -0,0 +1,14 @@
[package]
name = "ms_teams_alert_channel"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
serde = "1.0.219"
tokio.workspace = true
typetag = "0.2.20"
url.workspace = true

View File

@@ -0,0 +1,65 @@
mod prometheus_msteams;
use harmony::{
interpret::InterpretError, inventory::Inventory, maestro::Maestro, modules::{helm::chart::HelmChartScore, monitoring::{kube_prometheus::{prometheus_alert_channel::PrometheusAlertChannel, types::{AlertChannelConfig, AlertChannelReceiver, AlertChannelRoute, WebhookConfig}}, monitoring_alerting::MonitoringAlertingScore}}, topology::K8sAnywhereTopology
};
use prometheus_msteams::prometheus_msteams_score;
use url::Url;
use serde::{Serialize, Deserialize};
#[tokio::main]
async fn main() {
let alert_channels: Vec<Box<dyn PrometheusAlertChannel>> = vec![Box::new(MSTeamsChannel {
connector: "teams-test".to_string(),
webhook_url: url::Url::parse(
"https://msteams.com/services/dummy/dummy/dummy",
)
.expect("invalid URL"),
})];
let monitoring_score = MonitoringAlertingScore {
alert_channels,
namespace: None,
};
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
maestro.register_all(vec![Box::new(monitoring_score)]);
harmony_cli::init(maestro, None).await.unwrap();
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MSTeamsChannel {
connector: String,
webhook_url: Url,
}
#[typetag::serde]
impl PrometheusAlertChannel for MSTeamsChannel {
fn get_alert_manager_config_contribution(&self) -> Result<AlertChannelConfig, InterpretError> {
Ok(AlertChannelConfig{
receiver: AlertChannelReceiver{
name: format!("MSTeams-{}",self.connector),
slack_configs: None,
webhook_configs: Some(vec![WebhookConfig{
url: url::Url::parse("http://prometheus-msteams-prometheus-msteams.monitoring.svc.cluster.local:2000/alertmanager").expect("invalid url"),
send_resolved: true,}])
},
route: AlertChannelRoute{
receiver: format!("MSTeams-{}", self.connector),
matchers: vec!["alertname!=Watchdog".to_string()],
r#continue: true,
},
global_config: None, })
}
fn get_dependency_score(&self, ns: String) -> Option<HelmChartScore> {
Some(prometheus_msteams_score(self.connector.clone(), self.webhook_url.clone(), ns.clone()))
}
}

View File

@@ -0,0 +1,30 @@
use std::str::FromStr;
use harmony::modules::helm::chart::{HelmChartScore, NonBlankString};
use url::Url;
pub fn prometheus_msteams_score(
name: String,
webhook_url: Url,
namespace: String,
) -> HelmChartScore {
let values = format!(
r#"
connectors:
- default: "{webhook_url}"
"#,
);
HelmChartScore {
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str(&name).unwrap(),
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/library/prometheus-msteams")
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: Some(values.to_string()),
create_namespace: true,
install_only: true,
repository: None,
}
}

View File

@@ -0,0 +1,4 @@
#!/bin/bash
helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \
--set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f values.yaml

View File

@@ -0,0 +1,721 @@
# Default values for a single rook-ceph cluster
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Namespace of the main rook operator
operatorNamespace: rook-ceph
# -- The metadata.name of the CephCluster CR
# @default -- The same as the namespace
clusterName:
# -- Optional override of the target kubernetes version
kubeVersion:
# -- Cluster ceph.conf override
configOverride:
# configOverride: |
# [global]
# mon_allow_pool_delete = true
# osd_pool_default_size = 3
# osd_pool_default_min_size = 2
# Installs a debugging toolbox deployment
toolbox:
# -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
enabled: true
# -- Toolbox image, defaults to the image used by the Ceph cluster
image: #quay.io/ceph/ceph:v19.2.2
# -- Toolbox tolerations
tolerations: []
# -- Toolbox affinity
affinity: {}
# -- Toolbox container security context
containerSecurityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
# -- Toolbox resources
resources:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "128Mi"
# -- Set the priority class for the toolbox if desired
priorityClassName:
monitoring:
# -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
# Monitoring requires Prometheus to be pre-installed
enabled: false
# -- Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled
metricsDisabled: false
# -- Whether to create the Prometheus rules for Ceph alerts
createPrometheusRules: false
# -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
# deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
rulesNamespaceOverride:
# Monitoring settings for external clusters:
# externalMgrEndpoints: <list of endpoints>
# externalMgrPrometheusPort: <port>
# Scrape interval for prometheus
# interval: 10s
# allow adding custom labels and annotations to the prometheus rule
prometheusRule:
# -- Labels applied to PrometheusRule
labels: {}
# -- Annotations applied to PrometheusRule
annotations: {}
# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
pspEnable: false
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
# imagePullSecrets:
# - name: my-registry-secret
# All values below are taken from the CephCluster CRD
# -- Cluster configuration.
# @default -- See [below](#ceph-cluster-spec)
cephClusterSpec:
# This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
# as in the host-based example (cluster.yaml). For a different configuration such as a
# PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
# or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
# with the specs from those examples.
# For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v18 is Reef, v19 is Squid
# RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.2-20250409
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v19.2.2
# Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
# Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
# Use at your OWN risk
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
skipUpgradeChecks: false
# Whether or not continue if PGs are not clean during an upgrade
continueUpgradeAfterChecksEvenIfNotHealthy: false
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
# The default wait timeout is 10 minutes.
waitTimeoutForHealthyOSDInMinutes: 10
# Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
# This configuration will be ignored if `skipUpgradeChecks` is `true`.
# Default is false.
upgradeOSDRequiresHealthyPGs: false
mon:
# Set the number of mons to be started. Generally recommended to be 3.
# For highest availability, an odd number of mons should be specified.
count: 3
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false
mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: 2
allowMultiplePerNode: false
modules:
# List of modules to optionally enable or disable.
# Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
# - name: rook
# enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
# Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
# the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
ssl: true
# Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
network:
connections:
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
# The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
# When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
# IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
# you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
# The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
encryption:
enabled: false
# Whether to compress the data in transit across the wire. The default is false.
# The kernel requirements above for encryption also apply to compression.
compression:
enabled: false
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
# and clients will be required to connect to the Ceph cluster with the v2 port (3300).
# Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
requireMsgr2: false
# # enable host networking
# provider: host
# # EXPERIMENTAL: enable the Multus network provider
# provider: multus
# selectors:
# # The selector keys are required to be `public` and `cluster`.
# # Based on the configuration, the operator will do the following:
# # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
# # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
# #
# # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
# #
# # public: public-conf --> NetworkAttachmentDefinition object name in Multus
# # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
# # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
# ipFamily: "IPv6"
# # Ceph daemons to listen on both IPv4 and Ipv6 networks
# dualStack: false
# enable the crash collector for ceph daemon crash collection
crashCollector:
disable: false
# Uncomment daysToRetain to prune ceph crash entries older than the
# specified number of days.
# daysToRetain: 30
# enable log collector, daemons will log on files and rotate
logCollector:
enabled: true
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
cleanupPolicy:
# Since cluster cleanup is destructive to data, confirmation is required.
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
# Rook will immediately stop configuring the cluster and only wait for the delete command.
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
confirmation: ""
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
sanitizeDisks:
# method indicates if the entire disk should be sanitized or simply ceph's metadata
# in both case, re-install is possible
# possible choices are 'complete' or 'quick' (default)
method: quick
# dataSource indicate where to get random bytes from to write on the disk
# possible choices are 'zero' (default) or 'random'
# using random sources will consume entropy from the system and will take much more time then the zero source
dataSource: zero
# iteration overwrite N times instead of the default (1)
# takes an integer value
iteration: 1
# allowUninstallWithVolumes defines how the uninstall should be performed
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
allowUninstallWithVolumes: false
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
# tolerate taints with a key of 'storage-node'.
# placement:
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
# tolerations:
# - key: storage-node
# operator: Exists
# # The above placement information can also be specified for mon, osd, and mgr components
# mon:
# # Monitor deployments may contain an anti-affinity rule for avoiding monitor
# # collocation on the same node. This is a required rule when host network is used
# # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
# # preferred rule with weight: 50.
# osd:
# mgr:
# cleanup:
# annotations:
# all:
# mon:
# osd:
# cleanup:
# prepareosd:
# # If no mgr annotations are set, prometheus scrape annotations will be set by default.
# mgr:
# dashboard:
# labels:
# all:
# mon:
# osd:
# cleanup:
# mgr:
# prepareosd:
# # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
# # These labels can be passed as LabelSelector to Prometheus
# monitoring:
# dashboard:
resources:
mgr:
limits:
memory: "1Gi"
requests:
cpu: "500m"
memory: "512Mi"
mon:
limits:
memory: "2Gi"
requests:
cpu: "1000m"
memory: "1Gi"
osd:
limits:
memory: "4Gi"
requests:
cpu: "1000m"
memory: "4Gi"
prepareosd:
# limits: It is not recommended to set limits on the OSD prepare job
# since it's a one-time burst for memory that must be allowed to
# complete without an OOM kill. Note however that if a k8s
# limitRange guardrail is defined external to Rook, the lack of
# a limit here may result in a sync failure, in which case a
# limit should be added. 1200Mi may suffice for up to 15Ti
# OSDs ; for larger devices 2Gi may be required.
# cf. https://github.com/rook/rook/pull/11103
requests:
cpu: "500m"
memory: "50Mi"
mgr-sidecar:
limits:
memory: "100Mi"
requests:
cpu: "100m"
memory: "40Mi"
crashcollector:
limits:
memory: "60Mi"
requests:
cpu: "100m"
memory: "60Mi"
logcollector:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "100Mi"
cleanup:
limits:
memory: "1Gi"
requests:
cpu: "500m"
memory: "100Mi"
exporter:
limits:
memory: "128Mi"
requests:
cpu: "50m"
memory: "50Mi"
# The option to automatically remove OSDs that are out and are safe to destroy.
removeOSDsIfOutAndSafeToRemove: false
# priority classes to apply to ceph resources
priorityClassNames:
mon: system-node-critical
osd: system-node-critical
mgr: system-cluster-critical
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: true
# deviceFilter:
# config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# Configure the healthcheck and liveness probes for ceph pods.
# Valid values for daemons are 'mon', 'osd', 'status'
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
# Change pod liveness probe, it works for all mon, mgr, and osd pods.
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false
ingress:
# -- Enable an ingress for the ceph-dashboard
dashboard:
# {}
# labels:
# external-dns/private: "true"
annotations:
"route.openshift.io/termination": "passthrough"
# external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/server-snippet: |
# proxy_ssl_verify off;
host:
name: ceph.apps.ncd0.harmony.mcd
path: null # TODO the chart does not allow removing the path, and it causes openshift to fail creating a route, because path is not supported with termination mode passthrough
pathType: ImplementationSpecific
tls:
- {}
# secretName: testsecret-tls
# Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
# to set the ingress class
# ingressClassName: openshift-default
# labels:
# external-dns/private: "true"
# annotations:
# external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/server-snippet: |
# proxy_ssl_verify off;
# host:
# name: dashboard.example.com
# path: "/ceph-dashboard(/|$)(.*)"
# pathType: Prefix
# tls:
# - hosts:
# - dashboard.example.com
# secretName: testsecret-tls
## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
## to set the ingress class
# ingressClassName: nginx
# -- A list of CephBlockPool configurations to deploy
# @default -- See [below](#ceph-block-pools)
cephBlockPools:
- name: ceph-blockpool
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
spec:
failureDomain: host
replicated:
size: 3
# Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
# For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
# enableRBDStats: true
storageClass:
enabled: true
name: ceph-block
annotations: {}
labels: {}
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
# see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
allowedTopologies: []
# - matchLabelExpressions:
# - key: rook-ceph-role
# values:
# - storage-node
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
parameters:
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features, equivalent to OR'd bitfield value: 63
# Available for imageFormat: "2". Older releases of CSI RBD
# support only the `layering` feature. The Linux kernel (KRBD) supports the
# full feature complement as of 5.4
imageFeatures: layering
# These secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# -- A list of CephFileSystem configurations to deploy
# @default -- See [below](#ceph-file-systems)
cephFileSystems:
- name: ceph-filesystem
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
limits:
memory: "4Gi"
requests:
cpu: "1000m"
memory: "4Gi"
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
# (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
annotations: {}
labels: {}
mountOptions: []
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
parameters:
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# -- Settings for the filesystem snapshot class
# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
cephFileSystemVolumeSnapshotClass:
enabled: false
name: ceph-filesystem
isDefault: true
deletionPolicy: Delete
annotations: {}
labels: {}
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
parameters: {}
# -- Settings for the block pool snapshot class
# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
cephBlockPoolsVolumeSnapshotClass:
enabled: false
name: ceph-block
isDefault: false
deletionPolicy: Delete
annotations: {}
labels: {}
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
parameters: {}
# -- A list of CephObjectStore configurations to deploy
# @default -- See [below](#ceph-object-stores)
cephObjectStores:
- name: ceph-objectstore
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
erasureCoded:
dataChunks: 2
codingChunks: 1
parameters:
bulk: "true"
preservePoolsOnDelete: true
gateway:
port: 80
resources:
limits:
memory: "2Gi"
requests:
cpu: "1000m"
memory: "1Gi"
# securePort: 443
# sslCertificateRef:
instances: 1
priorityClassName: system-cluster-critical
# opsLogSidecar:
# resources:
# limits:
# memory: "100Mi"
# requests:
# cpu: "100m"
# memory: "40Mi"
storageClass:
enabled: true
name: ceph-bucket
reclaimPolicy: Delete
volumeBindingMode: "Immediate"
annotations: {}
labels: {}
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
parameters:
# note: objectStoreNamespace and objectStoreName are configured by the chart
region: us-east-1
ingress:
# Enable an ingress for the ceph-objectstore
enabled: true
# The ingress port by default will be the object store's "securePort" (if set), or the gateway "port".
# To override those defaults, set this ingress port to the desired port.
# port: 80
# annotations: {}
host:
name: objectstore.apps.ncd0.harmony.mcd
path: /
pathType: Prefix
# tls:
# - hosts:
# - objectstore.example.com
# secretName: ceph-objectstore-tls
# ingressClassName: nginx
## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
## For erasure coded a replicated metadata pool is required.
## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
#cephECBlockPools:
# - name: ec-pool
# spec:
# metadataPool:
# replicated:
# size: 2
# dataPool:
# failureDomain: osd
# erasureCoded:
# dataChunks: 2
# codingChunks: 1
# deviceClass: hdd
#
# parameters:
# # clusterID is the namespace where the rook cluster is running
# # If you change this namespace, also change the namespace below where the secret namespaces are defined
# clusterID: rook-ceph # namespace:cluster
# # (optional) mapOptions is a comma-separated list of map options.
# # For krbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# # For nbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# # mapOptions: lock_on_read,queue_depth=1024
#
# # (optional) unmapOptions is a comma-separated list of unmap options.
# # For krbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# # For nbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# # unmapOptions: force
#
# # RBD image format. Defaults to "2".
# imageFormat: "2"
#
# # RBD image features, equivalent to OR'd bitfield value: 63
# # Available for imageFormat: "2". Older releases of CSI RBD
# # support only the `layering` feature. The Linux kernel (KRBD) supports the
# # full feature complement as of 5.4
# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
# imageFeatures: layering
#
# storageClass:
# provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
# enabled: true
# name: rook-ceph-block
# isDefault: false
# annotations: { }
# labels: { }
# allowVolumeExpansion: true
# reclaimPolicy: Delete
# -- CSI driver name prefix for cephfs, rbd and nfs.
# @default -- `namespace name where rook-ceph operator is deployed`
csiDriverNamePrefix:

View File

@@ -0,0 +1,3 @@
#!/bin/bash
helm repo add rook-release https://charts.rook.io/release
helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph -f values.yaml

View File

@@ -0,0 +1,674 @@
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
# -- Image
repository: docker.io/rook/ceph
# -- Image tag
# @default -- `master`
tag: v1.17.1
# -- Image pull policy
pullPolicy: IfNotPresent
crds:
# -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
# managed independently with deploy/examples/crds.yaml.
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
# If the CRDs are deleted in this case, see
# [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
# to restore them.
enabled: true
# -- Pod resource requests & limits
resources:
limits:
memory: 512Mi
requests:
cpu: 200m
memory: 128Mi
# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
nodeSelector: {}
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
tolerations: []
# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
# the Kubernetes default of 5 minutes
unreachableNodeTolerationSeconds: 5
# -- Whether the operator should watch cluster CRD in its own namespace or not
currentNamespaceOnly: false
# -- Custom pod labels for the operator
operatorPodLabels: {}
# -- Pod annotations
annotations: {}
# -- Global log level for the operator.
# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
logLevel: INFO
# -- If true, create & use RBAC resources
rbacEnable: true
rbacAggregate:
# -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
enableOBCs: false
# -- If true, create & use PSP resources
pspEnable: false
# -- Set the priority class for the rook operator deployment if desired
priorityClassName:
# -- Set the container security context for the operator
containerSecurityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
# -- If true, loop devices are allowed to be used for osds in test clusters
allowLoopDevices: false
# Settings for whether to disable the drivers or other daemons if they are not
# needed
csi:
# -- Enable Ceph CSI RBD driver
enableRbdDriver: true
# -- Enable Ceph CSI CephFS driver
enableCephfsDriver: true
# -- Disable the CSI driver.
disableCsiDriver: "false"
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance
enableCSIHostNetwork: true
# -- Enable Snapshotter in CephFS provisioner pod
enableCephfsSnapshotter: true
# -- Enable Snapshotter in NFS provisioner pod
enableNFSSnapshotter: true
# -- Enable Snapshotter in RBD provisioner pod
enableRBDSnapshotter: true
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
enablePluginSelinuxHostMount: false
# -- Enable Ceph CSI PVC encryption support
enableCSIEncryption: false
# -- Enable volume group snapshot feature. This feature is
# enabled by default as long as the necessary CRDs are available in the cluster.
enableVolumeGroupSnapshot: true
# -- PriorityClassName to be set on csi driver plugin pods
pluginPriorityClassName: system-node-critical
# -- PriorityClassName to be set on csi driver provisioner pods
provisionerPriorityClassName: system-cluster-critical
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
rbdFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
cephFSFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
nfsFSGroupPolicy: "File"
# -- OMAP generator generates the omap mapping between the PV name and the RBD image
# which helps CSI to identify the rbd images for CSI operations.
# `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
# By default OMAP generator is disabled and when enabled, it will be deployed as a
# sidecar with CSI provisioner pod, to enable set it to true.
enableOMAPGenerator: false
# -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
# Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
cephFSKernelMountOptions:
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default
enableMetadata: false
# -- Set replicas for csi provisioner deployment
provisionerReplicas: 2
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
clusterName:
# -- Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
logLevel: 0
# -- Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
# @default -- `0`
sidecarLogLevel:
# -- CSI driver name prefix for cephfs, rbd and nfs.
# @default -- `namespace name where rook-ceph operator is deployed`
csiDriverNamePrefix:
# -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
rbdPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
# @default -- `1`
rbdPluginUpdateStrategyMaxUnavailable:
# -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
cephFSPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
# @default -- `1`
cephFSPluginUpdateStrategyMaxUnavailable:
# -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
nfsPluginUpdateStrategy:
# -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
grpcTimeoutInSeconds: 150
# -- Burst to use while communicating with the kubernetes apiserver.
kubeApiBurst:
# -- QPS to use while communicating with the kubernetes apiserver.
kubeApiQPS:
# -- The volume of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- The volume of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- CEPH CSI RBD provisioner resource requirement list
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
# @default -- see values.yaml
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
limits:
memory: 1Gi
- name : csi-omap-generator
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI RBD plugin resource requirement list
# @default -- see values.yaml
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS provisioner resource requirement list
# @default -- see values.yaml
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS plugin resource requirement list
# @default -- see values.yaml
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI NFS provisioner resource requirement list
# @default -- see values.yaml
csiNFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : csi-attacher
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# -- CEPH CSI NFS plugin resource requirement list
# @default -- see values.yaml
csiNFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CSI provisioner deployment [^1]
provisionerNodeAffinity: #key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
pluginNodeAffinity: # key1=value1,value2; key2=value3
# -- Enable Ceph CSI Liveness sidecar deployment
enableLiveness: false
# -- CSI CephFS driver metrics port
# @default -- `9081`
cephfsLivenessMetricsPort:
# -- CSI Addons server port
# @default -- `9070`
csiAddonsPort:
# -- CSI Addons server port for the RBD provisioner
# @default -- `9070`
csiAddonsRBDProvisionerPort:
# -- CSI Addons server port for the Ceph FS provisioner
# @default -- `9070`
csiAddonsCephFSProvisionerPort:
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
forceCephFSKernelClient: true
# -- Ceph CSI RBD driver metrics port
# @default -- `8080`
rbdLivenessMetricsPort:
serviceMonitor:
# -- Enable ServiceMonitor for Ceph CSI drivers
enabled: false
# -- Service monitor scrape interval
interval: 10s
# -- ServiceMonitor additional labels
labels: {}
# -- Use a different namespace for the ServiceMonitor
namespace:
# -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
# @default -- `/var/lib/kubelet`
kubeletDirPath:
# -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
# @default -- `137s`
csiLeaderElectionLeaseDuration:
# -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
# @default -- `107s`
csiLeaderElectionRenewDeadline:
# -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
# @default -- `26s`
csiLeaderElectionRetryPeriod:
cephcsi:
# -- Ceph CSI image repository
repository: quay.io/cephcsi/cephcsi
# -- Ceph CSI image tag
tag: v3.14.0
registrar:
# -- Kubernetes CSI registrar image repository
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
# -- Registrar image tag
tag: v2.13.0
provisioner:
# -- Kubernetes CSI provisioner image repository
repository: registry.k8s.io/sig-storage/csi-provisioner
# -- Provisioner image tag
tag: v5.1.0
snapshotter:
# -- Kubernetes CSI snapshotter image repository
repository: registry.k8s.io/sig-storage/csi-snapshotter
# -- Snapshotter image tag
tag: v8.2.0
attacher:
# -- Kubernetes CSI Attacher image repository
repository: registry.k8s.io/sig-storage/csi-attacher
# -- Attacher image tag
tag: v4.8.0
resizer:
# -- Kubernetes CSI resizer image repository
repository: registry.k8s.io/sig-storage/csi-resizer
# -- Resizer image tag
tag: v1.13.1
# -- Image pull policy
imagePullPolicy: IfNotPresent
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
cephfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
nfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
rbdPodLabels: #"key1=value1,key2=value2"
csiAddons:
# -- Enable CSIAddons
enabled: false
# -- CSIAddons sidecar image repository
repository: quay.io/csiaddons/k8s-sidecar
# -- CSIAddons sidecar image tag
tag: v0.12.0
nfs:
# -- Enable the nfs csi driver
enabled: false
topology:
# -- Enable topology based provisioning
enabled: false
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# -- domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
domainLabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/zone
# - topology.rook.io/rack
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
# CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
cephFSAttachRequired: true
# -- Whether to skip any attach operation altogether for RBD PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
# **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
# csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
# to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
# Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
rbdAttachRequired: true
# -- Whether to skip any attach operation altogether for NFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
# NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
nfsAttachRequired: true
# -- Enable discovery daemon
enableDiscoveryDaemon: false
# -- Set the discovery daemon device discovery interval (default to 60m)
discoveryDaemonInterval: 60m
# -- The timeout for ceph commands in seconds
cephCommandsTimeoutSeconds: "15"
# -- If true, run rook operator on the host network
useOperatorHostNetwork:
# -- If true, scale down the rook operator.
# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
# to deploy your helm charts.
scaleDownOperator: false
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
discover:
# -- Toleration for the discover pods.
# Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
toleration:
# -- The specific key of the taint to tolerate
tolerationKey:
# -- Array of tolerations in YAML format which will be added to discover deployment
tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of `discover-agent` [^1]
nodeAffinity:
# key1=value1,value2; key2=value3
#
# or
#
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: storage-node
# operator: Exists
# -- Labels to add to the discover pods
podLabels: # "key1=value1,key2=value2"
# -- Add resources to discover daemon pods
resources:
# - limits:
# memory: 512Mi
# - requests:
# cpu: 100m
# memory: 128Mi
# -- Custom label to identify node hostname. If not set `kubernetes.io/hostname` will be used
customHostnameLabel:
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
# -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
enforceHostNetwork: false
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10.
revisionHistoryLimit:
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
# - name: my-registry-secret
# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
enableOBCWatchOperatorNamespace: true
# -- Specify the prefix for the OBC provisioner in place of the cluster namespace
# @default -- `ceph cluster namespace`
obcProvisionerNamePrefix:
# -- Many OBC additional config fields may be risky for administrators to allow users control over.
# The safe and default-allowed fields are 'maxObjects' and 'maxSize'.
# Other fields should be considered risky. To allow all additional configs, use this value:
# "maxObjects,maxSize,bucketMaxObjects,bucketMaxSize,bucketPolicy,bucketLifecycle,bucketOwner"
# @default -- "maxObjects,maxSize"
obcAllowAdditionalConfigFields: "maxObjects,maxSize"
monitoring:
# -- Enable monitoring. Requires Prometheus to be pre-installed.
# Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
enabled: false

View File

@@ -1,20 +1,145 @@
use std::{
net::{IpAddr, Ipv4Addr},
sync::Arc,
};
use cidr::Ipv4Cidr;
use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{dummy::{ErrorScore, PanicScore, SuccessScore}, k8s::deployment::K8sDeploymentScore},
topology::HAClusterTopology,
modules::{
http::HttpScore,
ipxe::IpxeScore,
okd::{
bootstrap_dhcp::OKDBootstrapDhcpScore,
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
dns::OKDDnsScore,
},
tftp::TftpScore,
},
topology::{LogicalHost, UnmanagedRouter, Url},
};
use harmony_macros::{ip, mac_address};
#[tokio::main]
async fn main() {
let inventory = Inventory::autoload();
let topology = HAClusterTopology::autoload();
let mut maestro = Maestro::new(inventory, topology);
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.33.1"),
name: String::from("fw0"),
};
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
);
let lan_subnet = Ipv4Addr::new(192, 168, 33, 0);
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
let gateway_ip = IpAddr::V4(gateway_ipv4);
let topology = harmony::topology::HAClusterTopology {
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
// when setting up the opnsense firewall
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![
LogicalHost {
ip: ip!("192.168.33.20"),
name: "cp0".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.21"),
name: "cp1".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.22"),
name: "cp2".to_string(),
},
],
bootstrap_host: LogicalHost {
ip: ip!("192.168.33.66"),
name: "bootstrap".to_string(),
},
workers: vec![
LogicalHost {
ip: ip!("192.168.33.30"),
name: "wk0".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.31"),
name: "wk1".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.32"),
name: "wk2".to_string(),
},
],
switch: vec![],
};
let inventory = Inventory {
location: Location::new("I am mobile".to_string(), "earth".to_string()),
switch: SwitchGroup::from([]),
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
.management(Arc::new(OPNSenseManagementInterface::new()))]),
storage_host: vec![],
worker_host: vec![
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:0F")),
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:26")),
// thisone
// Then create the ipxe file
// set the dns static leases
// bootstrap nodes
// start ceph cluster
// try installation of lampscore
// bingo?
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:70")),
],
control_plane_host: vec![
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:60:FA")),
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:1A")),
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:01:BC:68")),
],
};
// TODO regroup smaller scores in a larger one such as this
// let okd_boostrap_preparation();
let bootstrap_dhcp_score = OKDBootstrapDhcpScore::new(&topology, &inventory);
let bootstrap_load_balancer_score = OKDBootstrapLoadBalancerScore::new(&topology);
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
let dns_score = OKDDnsScore::new(&topology);
let load_balancer_score =
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = HttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let ipxe_score = IpxeScore::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -12,7 +12,7 @@ use harmony::{
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::HttpScore,
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore},
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
opnsense::OPNsenseShellCommandScore,
tftp::TftpScore,
},
@@ -78,14 +78,13 @@ async fn main() {
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
let dns_score = OKDDnsScore::new(&topology);
let load_balancer_score =
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = HttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let mut maestro = Maestro::new(inventory, topology);
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),

View File

@@ -1,23 +1,70 @@
use std::net::{SocketAddr, SocketAddrV4};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
k8s::deployment::K8sDeploymentScore,
load_balancer::LoadBalancerScore,
},
topology::{
BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService,
},
topology::HAClusterTopology,
};
use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
let inventory = Inventory::autoload();
let topology = HAClusterTopology::autoload();
let mut maestro = Maestro::new(inventory, topology);
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
]);
harmony_tui::init(maestro).await.unwrap();
}
fn build_large_score() -> LoadBalancerScore {
let backend_server = BackendServer {
address: "192.168.0.0".to_string(),
port: 342,
};
let lb_service = LoadBalancerService {
backend_servers: vec![
backend_server.clone(),
backend_server.clone(),
backend_server.clone(),
],
listening_port: SocketAddr::V4(SocketAddrV4::new(ipv4!("192.168.0.0"), 49387)),
health_check: Some(HealthCheck::HTTP(
"/some_long_ass_path_to_see_how_it_is_displayed_but_it_has_to_be_even_longer"
.to_string(),
HttpMethod::GET,
HttpStatusCode::Success2xx,
)),
};
LoadBalancerScore {
public_services: vec![
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
],
private_services: vec![
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
],
}
}

View File

@@ -7,25 +7,47 @@ license.workspace = true
[dependencies]
libredfish = "0.1.1"
reqwest = {version = "0.11", features = ["blocking", "json"] }
reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde = { version = "1.0.209", features = ["derive"] }
serde_json = "1.0.127"
tokio = { workspace = true }
derive-new = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
async-trait = { workspace = true }
cidr = { workspace = true }
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
env_logger.workspace = true
async-trait.workspace = true
cidr.workspace = true
opnsense-config = { path = "../opnsense-config" }
opnsense-config-xml = { path = "../opnsense-config-xml" }
harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid = { workspace = true }
url = { workspace = true }
kube = { workspace = true }
k8s-openapi = { workspace = true }
serde_yaml = { workspace = true }
http = { workspace = true }
uuid.workspace = true
url.workspace = true
kube.workspace = true
k8s-openapi.workspace = true
serde_yaml.workspace = true
http.workspace = true
serde-value.workspace = true
inquire.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true
email_address = "0.2.9"
fqdn = { version = "0.4.6", features = [
"domain-label-cannot-start-or-end-with-hyphen",
"domain-label-length-limited-to-63",
"domain-name-without-special-chars",
"domain-name-length-limited-to-255",
"punycode",
"serde",
] }
temp-dir = "0.1.14"
typetag = "0.2.20"
dyn-clone = "1.0.19"

View File

@@ -0,0 +1,13 @@
use lazy_static::lazy_static;
use std::path::PathBuf;
lazy_static! {
pub static ref HARMONY_CONFIG_DIR: PathBuf = directories::BaseDirs::new()
.unwrap()
.data_dir()
.join("harmony");
pub static ref REGISTRY_URL: String =
std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string());
pub static ref REGISTRY_PROJECT: String =
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
}

View File

@@ -2,6 +2,8 @@ use std::sync::Arc;
use derive_new::new;
use harmony_types::net::MacAddress;
use serde::{Serialize, Serializer, ser::SerializeStruct};
use serde_value::Value;
pub type HostGroup = Vec<PhysicalHost>;
pub type SwitchGroup = Vec<Switch>;
@@ -75,10 +77,7 @@ impl PhysicalHost {
}
pub fn label(mut self, name: String, value: String) -> Self {
self.labels.push(Label {
_name: name,
_value: value,
});
self.labels.push(Label { name, value });
self
}
@@ -88,7 +87,49 @@ impl PhysicalHost {
}
}
#[derive(new)]
// Custom Serialize implementation for PhysicalHost
impl Serialize for PhysicalHost {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Determine the number of fields
let mut num_fields = 5; // category, network, storage, labels, management
if self.memory_size.is_some() {
num_fields += 1;
}
if self.cpu_count.is_some() {
num_fields += 1;
}
// Create a serialization structure
let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
// Serialize the standard fields
state.serialize_field("category", &self.category)?;
state.serialize_field("network", &self.network)?;
state.serialize_field("storage", &self.storage)?;
state.serialize_field("labels", &self.labels)?;
// Serialize optional fields
if let Some(memory) = self.memory_size {
state.serialize_field("memory_size", &memory)?;
}
if let Some(cpu) = self.cpu_count {
state.serialize_field("cpu_count", &cpu)?;
}
let mgmt_data = self.management.serialize_management();
// pub management: Arc<dyn ManagementInterface>,
// Handle management interface - either as a field or flattened
state.serialize_field("management", &mgmt_data)?;
state.end()
}
}
#[derive(new, Serialize)]
pub struct ManualManagementInterface;
impl ManagementInterface for ManualManagementInterface {
@@ -97,11 +138,12 @@ impl ManagementInterface for ManualManagementInterface {
}
fn get_supported_protocol_names(&self) -> String {
todo!()
// todo!()
"none".to_string()
}
}
pub trait ManagementInterface: Send + Sync {
pub trait ManagementInterface: Send + Sync + SerializableManagement {
fn boot_to_pxe(&self);
fn get_supported_protocol_names(&self) -> String;
}
@@ -115,21 +157,49 @@ impl std::fmt::Debug for dyn ManagementInterface {
}
}
#[derive(Debug, Clone)]
// Define a trait for serializing management interfaces
pub trait SerializableManagement {
fn serialize_management(&self) -> Value;
}
// Provide a blanket implementation for all types that implement both ManagementInterface and Serialize
impl<T> SerializableManagement for T
where
T: ManagementInterface + Serialize,
{
fn serialize_management(&self) -> Value {
serde_value::to_value(self).expect("ManagementInterface should serialize successfully")
}
}
#[derive(Debug, Clone, Serialize)]
pub enum HostCategory {
Server,
Firewall,
Switch,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct NetworkInterface {
pub name: Option<String>,
pub mac_address: MacAddress,
pub speed: Option<u64>,
}
#[derive(Debug, new, Clone)]
#[cfg(test)]
use harmony_macros::mac_address;
#[cfg(test)]
impl NetworkInterface {
pub fn dummy() -> Self {
Self {
name: Some(String::new()),
mac_address: mac_address!("00:00:00:00:00:00"),
speed: Some(0),
}
}
}
#[derive(Debug, new, Clone, Serialize)]
pub enum StorageConnectionType {
Sata3g,
Sata6g,
@@ -137,13 +207,13 @@ pub enum StorageConnectionType {
Sas12g,
PCIE,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub enum StorageKind {
SSD,
NVME,
HDD,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Storage {
pub connection: StorageConnectionType,
pub kind: StorageKind,
@@ -151,20 +221,33 @@ pub struct Storage {
pub serial: String,
}
#[derive(Debug, Clone)]
#[cfg(test)]
impl Storage {
pub fn dummy() -> Self {
Self {
connection: StorageConnectionType::Sata3g,
kind: StorageKind::SSD,
size: 0,
serial: String::new(),
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Switch {
_interface: Vec<NetworkInterface>,
_management_interface: NetworkInterface,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Label {
_name: String,
_value: String,
pub name: String,
pub value: String,
}
pub type Address = String;
#[derive(new, Debug)]
#[derive(new, Debug, Serialize)]
pub struct Location {
pub address: Address,
pub name: String,
@@ -178,3 +261,158 @@ impl Location {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
// Mock implementation of ManagementInterface
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockHPIlo {
ip: String,
username: String,
password: String,
firmware_version: String,
}
impl ManagementInterface for MockHPIlo {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
// Another mock implementation
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockDellIdrac {
hostname: String,
port: u16,
api_token: String,
}
impl ManagementInterface for MockDellIdrac {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
#[test]
fn test_serialize_physical_host_with_hp_ilo() {
// Create a PhysicalHost with HP iLO management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockHPIlo {
ip: "192.168.1.100".to_string(),
username: "admin".to_string(),
password: "password123".to_string(),
firmware_version: "2.5.0".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())],
memory_size: Some(64_000_000),
cpu_count: Some(16),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the HP iLO details
assert!(json.contains("192.168.1.100"));
assert!(json.contains("admin"));
assert!(json.contains("password123"));
assert!(json.contains("firmware_version"));
assert!(json.contains("2.5.0"));
// Parse back to verify structure (not the exact management interface)
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 16);
assert_eq!(parsed["memory_size"], 64_000_000);
assert_eq!(parsed["network"][0]["name"], "");
}
#[test]
fn test_serialize_physical_host_with_dell_idrac() {
// Create a PhysicalHost with Dell iDRAC management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockDellIdrac {
hostname: "idrac-server01".to_string(),
port: 443,
api_token: "abcdef123456".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("env".to_string(), "production".to_string())],
memory_size: Some(128_000_000),
cpu_count: Some(32),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the Dell iDRAC details
assert!(json.contains("idrac-server01"));
assert!(json.contains("443"));
assert!(json.contains("abcdef123456"));
// Parse back to verify structure
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 32);
assert_eq!(parsed["memory_size"], 128_000_000);
assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null);
}
#[test]
fn test_different_management_implementations_produce_valid_json() {
// Create hosts with different management implementations
let host1 = PhysicalHost {
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockHPIlo {
ip: "10.0.0.1".to_string(),
username: "root".to_string(),
password: "secret".to_string(),
firmware_version: "3.0.0".to_string(),
}),
storage: vec![],
labels: vec![],
memory_size: None,
cpu_count: None,
};
let host2 = PhysicalHost {
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockDellIdrac {
hostname: "server02-idrac".to_string(),
port: 8443,
api_token: "token123".to_string(),
}),
storage: vec![],
labels: vec![],
memory_size: None,
cpu_count: None,
};
// Both should serialize successfully
let json1 = serde_json::to_string(&host1).expect("Failed to serialize host1");
let json2 = serde_json::to_string(&host2).expect("Failed to serialize host2");
// Both JSONs should be valid and parseable
let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1");
let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2");
// The JSONs should be different because they contain different management interfaces
assert_ne!(json1, json2);
}
}

View File

@@ -7,7 +7,6 @@ use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::Topology,
};
pub enum InterpretName {
@@ -16,9 +15,11 @@ pub enum InterpretName {
LoadBalancer,
Tftp,
Http,
Ipxe,
Dummy,
Panic,
OPNSense,
K3dInstallation,
}
impl std::fmt::Display for InterpretName {
@@ -29,15 +30,17 @@ impl std::fmt::Display for InterpretName {
InterpretName::LoadBalancer => f.write_str("LoadBalancer"),
InterpretName::Tftp => f.write_str("Tftp"),
InterpretName::Http => f.write_str("Http"),
InterpretName::Ipxe => f.write_str("iPXE"),
InterpretName::Dummy => f.write_str("Dummy"),
InterpretName::Panic => f.write_str("Panic"),
InterpretName::OPNSense => f.write_str("OPNSense"),
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
}
}
}
#[async_trait]
pub trait Interpret<T: Topology>: std::fmt::Debug + Send {
pub trait Interpret<T>: std::fmt::Debug + Send {
async fn execute(&self, inventory: &Inventory, topology: &T)
-> Result<Outcome, InterpretError>;
fn get_name(&self) -> InterpretName;

View File

@@ -1,9 +1,9 @@
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use log::info;
use log::{info, warn};
use super::{
interpret::{InterpretError, Outcome},
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::Topology,
@@ -15,6 +15,7 @@ pub struct Maestro<T: Topology> {
inventory: Inventory,
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_preparation_result: Mutex<Option<Outcome>>,
}
impl<T: Topology> Maestro<T> {
@@ -23,32 +24,32 @@ impl<T: Topology> Maestro<T> {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_preparation_result: None.into(),
}
}
// Load the inventory and inventory from environment.
// This function is able to discover the context that it is running in, such as k8s clusters, aws cloud, linux host, etc.
// When the HARMONY_TOPOLOGY environment variable is not set, it will default to install k3s
// locally (lazily, if not installed yet, when the first execution occurs) and use that as a topology
// So, by default, the inventory is a single host that the binary is running on, and the
// topology is a single node k3s
//
// By default :
// - Linux => k3s
// - macos, windows => docker compose
//
// To run more complex cases like OKDHACluster, either provide the default target in the
// harmony infrastructure as code or as an environment variable
pub fn load_from_env() -> Self {
// Load env var HARMONY_TOPOLOGY
match std::env::var("HARMONY_TOPOLOGY") {
Ok(_) => todo!(),
Err(_) => todo!(),
}
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let instance = Self::new(inventory, topology);
instance.prepare_topology().await?;
Ok(instance)
}
pub fn start(&mut self) {
info!("Starting Maestro");
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
info!("Ensuring topology '{}' is ready...", self.topology.name());
let outcome = self.topology.ensure_ready().await?;
info!(
"Topology '{}' readiness check complete: {}",
self.topology.name(),
outcome.status
);
self.topology_preparation_result
.lock()
.unwrap()
.replace(outcome.clone());
Ok(outcome)
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
@@ -56,7 +57,26 @@ impl<T: Topology> Maestro<T> {
score_mut.append(&mut scores);
}
fn is_topology_initialized(&self) -> bool {
let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() {
match outcome.status {
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else {
false
}
}
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
if !self.is_topology_initialized() {
warn!(
"Launching interpret for score {} but Topology {} is not fully initialized!",
score.name(),
self.topology.name(),
);
}
info!("Running score {score:?}");
let interpret = score.create_interpret();
info!("Launching interpret {interpret:?}");

View File

@@ -1,3 +1,4 @@
pub mod config;
pub mod data;
pub mod executors;
pub mod filter;

View File

@@ -1,15 +1,37 @@
use std::collections::BTreeMap;
use serde::Serialize;
use serde_value::Value;
use super::{interpret::Interpret, topology::Topology};
pub trait Score<T: Topology>: std::fmt::Debug + Send + Sync + CloneBoxScore<T> {
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
}
pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value;
}
impl<'de, S, T> SerializeScore<T> for S
where
T: Topology,
S: Score<T> + Serialize,
{
fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble
// up?
serde_value::to_value(&self).expect("Score should serialize successfully")
}
}
pub trait CloneBoxScore<T: Topology> {
fn clone_box(&self) -> Box<dyn Score<T>>;
}
impl<S, T> CloneBoxScore<T> for S
where
T: Topology,
@@ -19,3 +41,191 @@ where
Box::new(self.clone())
}
}
pub trait ScoreToString<T: Topology> {
fn print_score_details(&self) -> String;
fn format_value_as_string(&self, val: &Value, indent: usize) -> String;
fn format_map(&self, map: &BTreeMap<Value, Value>, indent: usize) -> String;
fn wrap_or_truncate(&self, s: &str, width: usize) -> Vec<String>;
}
impl<S, T> ScoreToString<T> for S
where
T: Topology,
S: Score<T> + 'static,
{
fn print_score_details(&self) -> String {
let mut output = String::new();
output += "\n";
output += &self.format_value_as_string(&self.serialize(), 0);
output += "\n";
output
}
fn format_map(&self, map: &BTreeMap<Value, Value>, indent: usize) -> String {
let pad = " ".repeat(indent * 2);
let mut output = String::new();
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n",
pad
);
output += &format!("{}| {:<24} | {:<48} |\n", pad, "score_name", self.name());
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n",
pad
);
for (k, v) in map {
let key_str = match k {
Value::String(s) => s.clone(),
other => format!("{:?}", other),
};
let formatted_val = self.format_value_as_string(v, indent + 1);
let lines = formatted_val.lines().map(|line| line.trim_start());
let wrapped_lines: Vec<_> = lines
.flat_map(|line| self.wrap_or_truncate(line.trim_start(), 48))
.collect();
if let Some(first) = wrapped_lines.first() {
output += &format!("{}| {:<24} | {:<48} |\n", pad, key_str, first);
for line in &wrapped_lines[1..] {
output += &format!("{}| {:<24} | {:<48} |\n", pad, "", line);
}
}
// let first_line = lines.next().unwrap_or("");
// output += &format!("{}| {:<24} | {:<48} |\n", pad, key_str, first_line);
//
// for line in lines {
// output += &format!("{}| {:<24} | {:<48} |\n", pad, "", line);
// }
}
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n\n",
pad
);
output
}
fn wrap_or_truncate(&self, s: &str, width: usize) -> Vec<String> {
let mut lines = Vec::new();
let mut current = s;
while !current.is_empty() {
if current.len() <= width {
lines.push(current.to_string());
break;
}
// Try to wrap at whitespace if possible
let mut split_index = current[..width].rfind(' ').unwrap_or(width);
if split_index == 0 {
split_index = width;
}
lines.push(current[..split_index].trim_end().to_string());
current = current[split_index..].trim_start();
}
lines
}
fn format_value_as_string(&self, val: &Value, indent: usize) -> String {
let pad = " ".repeat(indent * 2);
let mut output = String::new();
match val {
Value::Bool(b) => output += &format!("{}{}\n", pad, b),
Value::U8(u) => output += &format!("{}{}\n", pad, u),
Value::U16(u) => output += &format!("{}{}\n", pad, u),
Value::U32(u) => output += &format!("{}{}\n", pad, u),
Value::U64(u) => output += &format!("{}{}\n", pad, u),
Value::I8(i) => output += &format!("{}{}\n", pad, i),
Value::I16(i) => output += &format!("{}{}\n", pad, i),
Value::I32(i) => output += &format!("{}{}\n", pad, i),
Value::I64(i) => output += &format!("{}{}\n", pad, i),
Value::F32(f) => output += &format!("{}{}\n", pad, f),
Value::F64(f) => output += &format!("{}{}\n", pad, f),
Value::Char(c) => output += &format!("{}{}\n", pad, c),
Value::String(s) => output += &format!("{}{:<48}\n", pad, s),
Value::Unit => output += &format!("{}<unit>\n", pad),
Value::Bytes(bytes) => output += &format!("{}{:?}\n", pad, bytes),
Value::Option(opt) => match opt {
Some(inner) => {
output += &format!("{}Option:\n", pad);
output += &self.format_value_as_string(inner, indent + 1);
}
None => output += &format!("{}None\n", pad),
},
Value::Newtype(inner) => {
output += &format!("{}Newtype:\n", pad);
output += &self.format_value_as_string(inner, indent + 1);
}
Value::Seq(seq) => {
if seq.is_empty() {
output += &format!("{}[]\n", pad);
} else {
output += &format!("{}[\n", pad);
for item in seq {
output += &self.format_value_as_string(item, indent + 1);
}
output += &format!("{}]\n", pad);
}
}
Value::Map(map) => {
if map.is_empty() {
output += &format!("{}<empty map>\n", pad);
} else if indent == 0 {
output += &self.format_map(map, indent);
} else {
for (k, v) in map {
let key_str = match k {
Value::String(s) => s.clone(),
other => format!("{:?}", other),
};
let val_str = self
.format_value_as_string(v, indent + 1)
.trim()
.to_string();
let val_lines: Vec<_> = val_str.lines().collect();
output +=
&format!("{}{}: {}\n", pad, key_str, val_lines.first().unwrap_or(&""));
for line in val_lines.iter().skip(1) {
output += &format!("{} {}\n", pad, line);
}
}
}
}
}
output
}
}
//TODO write test to check that the output is what it should be
//
#[cfg(test)]
mod tests {
use super::*;
use crate::modules::dns::DnsScore;
use crate::topology::HAClusterTopology;
#[test]
fn test_format_values_as_string() {
let dns_score = Box::new(DnsScore::new(vec![], None));
let print_score_output =
<DnsScore as ScoreToString<HAClusterTopology>>::print_score_details(&dns_score);
let expected_empty_dns_score_table = "\n+--------------------------+--------------------------------------------------+\n| score_name | DnsScore |\n+--------------------------+--------------------------------------------------+\n| dns_entries | [] |\n| register_dhcp_leases | None |\n+--------------------------+--------------------------------------------------+\n\n\n";
assert_eq!(print_score_output, expected_empty_dns_score_table);
}
}

View File

@@ -1,8 +1,11 @@
use async_trait::async_trait;
use harmony_macros::ip;
use harmony_types::net::MacAddress;
use log::info;
use crate::executors::ExecutorError;
use crate::interpret::InterpretError;
use crate::interpret::Outcome;
use super::DHCPStaticEntry;
use super::DhcpServer;
@@ -12,16 +15,16 @@ use super::DnsServer;
use super::Firewall;
use super::HttpServer;
use super::IpAddress;
use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::OcK8sclient;
use super::Router;
use super::TftpServer;
use super::Topology;
use super::Url;
use super::openshift::OpenshiftClient;
use super::k8s::K8sClient;
use std::sync::Arc;
#[derive(Debug, Clone)]
@@ -40,16 +43,24 @@ pub struct HAClusterTopology {
pub switch: Vec<LogicalHost>,
}
#[async_trait]
impl Topology for HAClusterTopology {
fn name(&self) -> &str {
todo!()
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
todo!(
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
)
}
}
#[async_trait]
impl OcK8sclient for HAClusterTopology {
async fn oc_client(&self) -> Result<Arc<OpenshiftClient>, kube::Error> {
Ok(Arc::new(OpenshiftClient::try_default().await?))
impl K8sclient for HAClusterTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
Ok(Arc::new(
K8sClient::try_default().await.map_err(|e| e.to_string())?,
))
}
}
@@ -157,6 +168,16 @@ impl DhcpServer for HAClusterTopology {
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.dhcp_server.commit_config().await
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename(filename).await
}
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename64(filename64).await
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filenameipxe(filenameipxe).await
}
}
#[async_trait]
@@ -215,7 +236,20 @@ impl HttpServer for HAClusterTopology {
}
#[derive(Debug)]
struct DummyInfra;
pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
todo!()
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(Outcome::success(dummy_msg.to_string()))
}
}
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";
@@ -269,6 +303,15 @@ impl DhcpServer for DummyInfra {
async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filename64(&self, _filename: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn set_filenameipxe(&self, _filenameipxe: &str) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}

View File

@@ -0,0 +1 @@
pub trait HelmCommand {}

View File

@@ -1,4 +1,5 @@
use derive_new::new;
use serde::Serialize;
use crate::hardware::PhysicalHost;
@@ -8,7 +9,7 @@ use super::LogicalHost;
///
/// This is the only construct that directly maps a logical host to a physical host.
/// It serves as a bridge between the logical cluster structure and the physical infrastructure.
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct HostBinding {
/// Reference to the LogicalHost
pub logical_host: LogicalHost,

View File

@@ -0,0 +1,88 @@
use derive_new::new;
use k8s_openapi::NamespaceResourceScope;
use kube::{
Api, Client, Config, Error, Resource,
api::PostParams,
config::{KubeConfigOptions, Kubeconfig},
};
use log::error;
use serde::de::DeserializeOwned;
#[derive(new)]
pub struct K8sClient {
client: Client,
}
impl K8sClient {
pub async fn try_default() -> Result<Self, Error> {
Ok(Self {
client: Client::try_default().await?,
})
}
pub async fn apply_all<
K: Resource<Scope = NamespaceResourceScope>
+ std::fmt::Debug
+ Sync
+ DeserializeOwned
+ Default
+ serde::Serialize
+ Clone,
>(
&self,
resource: &Vec<K>,
) -> Result<Vec<K>, kube::Error>
where
<K as kube::Resource>::DynamicType: Default,
{
let mut result = vec![];
for r in resource.iter() {
let api: Api<K> = Api::all(self.client.clone());
result.push(api.create(&PostParams::default(), &r).await?);
}
Ok(result)
}
pub async fn apply_namespaced<K>(
&self,
resource: &Vec<K>,
ns: Option<&str>,
) -> Result<Vec<K>, Error>
where
K: Resource<Scope = NamespaceResourceScope>
+ Clone
+ std::fmt::Debug
+ DeserializeOwned
+ serde::Serialize
+ Default,
<K as kube::Resource>::DynamicType: Default,
{
let mut resources = Vec::new();
for r in resource.iter() {
let api: Api<K> = match ns {
Some(ns) => Api::namespaced(self.client.clone(), ns),
None => Api::default_namespaced(self.client.clone()),
};
resources.push(api.create(&PostParams::default(), &r).await?);
}
Ok(resources)
}
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
let k = match Kubeconfig::read_from(path) {
Ok(k) => k,
Err(e) => {
error!("Failed to load kubeconfig from {path} : {e}");
return None;
}
};
Some(K8sClient::new(
Client::try_from(
Config::from_custom_kubeconfig(k, &KubeConfigOptions::default())
.await
.unwrap(),
)
.unwrap(),
))
}
}

View File

@@ -0,0 +1,211 @@
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use inquire::Confirm;
use log::{info, warn};
use tokio::sync::OnceCell;
use crate::{
interpret::{InterpretError, Outcome},
inventory::Inventory,
maestro::Maestro,
modules::k3d::K3DInstallationScore,
topology::LocalhostTopology,
};
use super::{HelmCommand, K8sclient, Topology, k8s::K8sClient};
struct K8sState {
client: Arc<K8sClient>,
source: K8sSource,
message: String,
}
enum K8sSource {
LocalK3d,
Kubeconfig,
}
pub struct K8sAnywhereTopology {
k8s_state: OnceCell<Option<K8sState>>,
}
#[async_trait]
impl K8sclient for K8sAnywhereTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
let state = match self.k8s_state.get() {
Some(state) => state,
None => return Err("K8s state not initialized yet".to_string()),
};
let state = match state {
Some(state) => state,
None => return Err("K8s client initialized but empty".to_string()),
};
Ok(state.client.clone())
}
}
impl K8sAnywhereTopology {
pub fn new() -> Self {
Self {
k8s_state: OnceCell::new(),
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
.output()
.map_err(|e| format!("Failed to execute 'helm -version': {}", e))?;
if !version_result.status.success() {
return Err("Failed to run 'helm -version'".to_string());
}
// Print the version output
let version_output = String::from_utf8_lossy(&version_result.stdout);
println!("Helm version: {}", version_output.trim());
Ok(())
}
async fn try_load_system_kubeconfig(&self) -> Option<K8sClient> {
todo!("Use kube-rs default behavior to load system kubeconfig");
}
async fn try_load_kubeconfig(&self, path: &str) -> Option<K8sClient> {
K8sClient::from_kubeconfig(path).await
}
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
let maestro = Maestro::initialize(Inventory::autoload(), LocalhostTopology::new()).await?;
let k3d_score = self.get_k3d_installation_score();
maestro.interpret(Box::new(k3d_score)).await?;
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
let k8s_anywhere_config = K8sAnywhereConfig {
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
};
if k8s_anywhere_config.use_system_kubeconfig {
match self.try_load_system_kubeconfig().await {
Some(_client) => todo!(),
None => todo!(),
}
}
if let Some(kubeconfig) = k8s_anywhere_config.kubeconfig {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(client) => {
return Ok(Some(K8sState {
client: Arc::new(client),
source: K8sSource::Kubeconfig,
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
}));
}
None => {
return Err(InterpretError::new(format!(
"Failed to load kubeconfig from {kubeconfig}"
)));
}
}
}
info!("No kubernetes configuration found");
if !k8s_anywhere_config.autoinstall {
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
if !confirmation {
warn!(
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
);
return Ok(None);
}
}
info!("Starting K8sAnywhere installation");
self.try_install_k3d().await?;
let k3d_score = self.get_k3d_installation_score();
// I feel like having to rely on the k3d_rs crate here is a smell
// I think we should have a way to interact more deeply with scores/interpret. Maybe the
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
// good implementation due to the stateful nature of the k3d thing. Which is why I went
// with this solution for now
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),
source: K8sSource::LocalK3d,
message: "Successfully installed K3D cluster and acquired client".to_string(),
},
Err(_) => todo!(),
};
Ok(Some(state))
}
}
struct K8sAnywhereConfig {
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
/// cluster
///
/// Default : None
kubeconfig: Option<String>,
/// Whether to use the system KUBECONFIG, either the environment variable or the file in the
/// default or configured location
///
/// Default : false
use_system_kubeconfig: bool,
/// Whether to install automatically a kubernetes cluster
///
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
///
/// Default: true
autoinstall: bool,
}
#[async_trait]
impl Topology for K8sAnywhereTopology {
fn name(&self) -> &str {
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
match self.is_helm_available() {
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}
impl HelmCommand for K8sAnywhereTopology {}

View File

@@ -2,6 +2,7 @@ use std::{net::SocketAddr, str::FromStr};
use async_trait::async_trait;
use log::debug;
use serde::Serialize;
use super::{IpAddress, LogicalHost};
use crate::executors::ExecutorError;
@@ -36,20 +37,21 @@ impl std::fmt::Debug for dyn LoadBalancer {
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
}
}
#[derive(Debug, PartialEq, Clone)]
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct LoadBalancerService {
pub backend_servers: Vec<BackendServer>,
pub listening_port: SocketAddr,
pub health_check: Option<HealthCheck>,
}
#[derive(Debug, PartialEq, Clone)]
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct BackendServer {
// TODO should not be a string, probably IPAddress
pub address: String,
pub port: u16,
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HttpMethod {
GET,
POST,
@@ -91,14 +93,14 @@ impl std::fmt::Display for HttpMethod {
}
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HttpStatusCode {
Success2xx,
UserError4xx,
ServerError5xx,
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HealthCheck {
HTTP(String, HttpMethod, HttpStatusCode),
TCP(Option<u16>),

View File

@@ -0,0 +1,25 @@
use async_trait::async_trait;
use derive_new::new;
use crate::interpret::{InterpretError, Outcome};
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
#[async_trait]
impl Topology for LocalhostTopology {
fn name(&self) -> &str {
"LocalHostTopology"
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
Ok(Outcome::success(
"Localhost is Chuck Norris, always ready.".to_string(),
))
}
}
// TODO: Delete this, temp for test
impl HelmCommand for LocalhostTopology {}

View File

@@ -1,10 +1,15 @@
mod ha_cluster;
mod host_binding;
mod http;
mod k8s_anywhere;
mod localhost;
pub use k8s_anywhere::*;
pub use localhost::*;
pub mod k8s;
mod load_balancer;
pub mod openshift;
mod router;
mod tftp;
use async_trait::async_trait;
pub use ha_cluster::*;
pub use load_balancer::*;
pub use router::*;
@@ -12,15 +17,47 @@ mod network;
pub use host_binding::*;
pub use http::*;
pub use network::*;
use serde::Serialize;
pub use tftp::*;
mod helm_command;
pub use helm_command::*;
use std::net::IpAddr;
pub trait Topology {
fn name(&self) -> &str;
}
use super::interpret::{InterpretError, Outcome};
pub trait Capability {}
/// Represents a logical view of an infrastructure environment providing specific capabilities.
///
/// A Topology acts as a self-contained "package" responsible for managing access
/// to its underlying resources and ensuring they are in a ready state before use.
/// It defines the contract for the capabilities it provides through implemented
/// capability traits (e.g., `HasK8sCapability`, `HasDnsServer`).
#[async_trait]
pub trait Topology: Send + Sync {
/// Returns a unique identifier or name for this specific topology instance.
/// This helps differentiate between multiple instances of potentially the same type.
fn name(&self) -> &str;
/// Ensures that the topology and its required underlying components or services
/// are ready to provide their declared capabilities.
///
/// Implementations of this method MUST be idempotent. Subsequent calls after a
/// successful readiness check should ideally be cheap NO-OPs.
///
/// This method encapsulates the logic for:
/// 1. **Checking Current State:** Assessing if the required resources/services are already running and configured.
/// 2. **Discovery:** Identifying the runtime environment (e.g., local Docker, AWS, existing cluster).
/// 3. **Initialization/Bootstrapping:** Performing necessary setup actions if not already ready. This might involve:
/// * Making API calls.
/// * Running external commands (e.g., `k3d`, `docker`).
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
///
/// # Returns
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
}
pub type IpAddress = IpAddr;
@@ -30,6 +67,18 @@ pub enum Url {
Url(url::Url),
}
impl Serialize for Url {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(&url.as_str()),
}
}
}
impl std::fmt::Display for Url {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
@@ -48,7 +97,7 @@ impl std::fmt::Display for Url {
/// - A control plane node
///
/// This abstraction focuses on the logical role and services, independent of the physical hardware.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct LogicalHost {
/// The IP address of this logical host.
pub ip: IpAddress,
@@ -130,3 +179,23 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn test_serialize_local_folder() {
let url = Url::LocalFolder("path/to/folder".to_string());
let serialized = serde_json::to_string(&url).unwrap();
assert_eq!(serialized, "\"path/to/folder\"");
}
#[test]
fn test_serialize_url() {
let url = Url::Url(url::Url::parse("https://example.com").unwrap());
let serialized = serde_json::to_string(&url).unwrap();
assert_eq!(serialized, "\"https://example.com/\"");
}
}

View File

@@ -2,10 +2,11 @@ use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use serde::Serialize;
use crate::executors::ExecutorError;
use super::{openshift::OpenshiftClient, IpAddress, LogicalHost};
use super::{IpAddress, LogicalHost, k8s::K8sClient};
#[derive(Debug)]
pub struct DHCPStaticEntry {
@@ -41,11 +42,10 @@ pub struct NetworkDomain {
pub name: String,
}
#[async_trait]
pub trait OcK8sclient: Send + Sync + std::fmt::Debug {
async fn oc_client(&self) -> Result<Arc<OpenshiftClient>, kube::Error>;
pub trait K8sclient: Send + Sync {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
}
#[async_trait]
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
@@ -53,6 +53,9 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>;
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>;
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>;
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError>;
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
async fn commit_config(&self) -> Result<(), ExecutorError>;
@@ -62,11 +65,7 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug {
pub trait DnsServer: Send + Sync {
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError>;
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError>;
fn remove_record(
&self,
name: &str,
record_type: DnsRecordType,
) -> Result<(), ExecutorError>;
fn remove_record(&self, name: &str, record_type: DnsRecordType) -> Result<(), ExecutorError>;
async fn list_records(&self) -> Vec<DnsRecord>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
@@ -117,7 +116,7 @@ pub enum Action {
Deny,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub enum DnsRecordType {
A,
AAAA,
@@ -138,7 +137,7 @@ impl std::fmt::Display for DnsRecordType {
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct DnsRecord {
pub host: String,
pub domain: String,

View File

@@ -1,55 +0,0 @@
use k8s_openapi::NamespaceResourceScope;
use kube::{Api, Client, Error, Resource, api::PostParams};
use serde::de::DeserializeOwned;
pub struct OpenshiftClient {
client: Client,
}
impl OpenshiftClient {
pub async fn try_default() -> Result<Self, Error> {
Ok(Self {
client: Client::try_default().await?,
})
}
pub async fn apply_all<
K: Resource<Scope = NamespaceResourceScope>
+ std::fmt::Debug
+ Sync
+ DeserializeOwned
+ Default
+ serde::Serialize
+ Clone,
>(
&self,
resource: &Vec<K>,
) -> Result<Vec<K>, kube::Error>
where
<K as kube::Resource>::DynamicType: Default,
{
let mut result = vec![];
for r in resource.iter() {
let api: Api<K> = Api::all(self.client.clone());
result.push(api.create(&PostParams::default(), &r).await?);
}
Ok(result)
}
pub async fn apply_namespaced<K>(&self, resource: &Vec<K>) -> Result<K, Error>
where
K: Resource<Scope = NamespaceResourceScope>
+ Clone
+ std::fmt::Debug
+ DeserializeOwned
+ serde::Serialize
+ Default,
<K as kube::Resource>::DynamicType: Default,
{
for r in resource.iter() {
let api: Api<K> = Api::default_namespaced(self.client.clone());
api.create(&PostParams::default(), &r).await?;
}
todo!("")
}
}

View File

@@ -3,8 +3,9 @@ use crate::topology::IpAddress;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct HPIlo {
ip_address: Option<IpAddress>,
mac_address: Option<MacAddress>,

View File

@@ -2,8 +2,9 @@ use crate::hardware::ManagementInterface;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct IntelAmtManagement {
mac_address: MacAddress,
}

View File

@@ -69,4 +69,34 @@ impl DhcpServer for OPNSenseFirewall {
Ok(())
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filename(filename);
debug!("OPNsense dhcp server set filename {filename}");
}
Ok(())
}
async fn set_filename64(&self, filename: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filename64(filename);
debug!("OPNsense dhcp server set filename {filename}");
}
Ok(())
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
{
let mut writable_opnsense = self.opnsense_config.write().await;
writable_opnsense.dhcp().set_filenameipxe(filenameipxe);
debug!("OPNsense dhcp server set filenameipxe {filenameipxe}");
}
Ok(())
}
}

View File

@@ -61,7 +61,7 @@ impl HttpServer for OPNSenseFirewall {
info!("Adding custom caddy config files");
config
.upload_files(
"../../../watchguard/caddy_config",
"./data/watchguard/caddy_config",
"/usr/local/etc/caddy/caddy.d/",
)
.await

View File

@@ -370,10 +370,13 @@ mod tests {
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(result, vec![BackendServer {
address: "192.168.1.1".to_string(),
port: 80,
},]);
assert_eq!(
result,
vec![BackendServer {
address: "192.168.1.1".to_string(),
port: 80,
},]
);
}
#[test]
fn test_get_servers_for_backend_no_linked_servers() {
@@ -430,15 +433,18 @@ mod tests {
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(result, vec![
BackendServer {
address: "some-hostname.test.mcd".to_string(),
port: 80,
},
BackendServer {
address: "192.168.1.2".to_string(),
port: 8080,
},
]);
assert_eq!(
result,
vec![
BackendServer {
address: "some-hostname.test.mcd".to_string(),
port: 80,
},
BackendServer {
address: "192.168.1.2".to_string(),
port: 8080,
},
]
);
}
}

View File

@@ -1,7 +1,8 @@
use crate::hardware::ManagementInterface;
use derive_new::new;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct OPNSenseManagementInterface {}
impl ManagementInterface for OPNSenseManagementInterface {

View File

@@ -0,0 +1,46 @@
use std::{collections::HashMap, str::FromStr};
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use url::Url;
use crate::{
modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score,
topology::{HelmCommand, Topology},
};
#[derive(Debug, Serialize, Clone)]
pub struct CertManagerHelmScore {}
impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
let mut values_overrides = HashMap::new();
values_overrides.insert(
NonBlankString::from_str("crds.enabled").unwrap(),
"true".to_string(),
);
let values_overrides = Some(values_overrides);
HelmChartScore {
namespace: Some(NonBlankString::from_str("cert-manager").unwrap()),
release_name: NonBlankString::from_str("cert-manager").unwrap(),
chart_name: NonBlankString::from_str("jetstack/cert-manager").unwrap(),
chart_version: None,
values_overrides,
values_yaml: None,
create_namespace: true,
install_only: true,
repository: Some(HelmRepository::new(
"jetstack".to_string(),
Url::parse("https://charts.jetstack.io").unwrap(),
true,
)),
}
.create_interpret()
}
fn name(&self) -> String {
format!("CertManagerHelmScore")
}
}

View File

@@ -0,0 +1,2 @@
mod helm;
pub use helm::*;

View File

@@ -1,7 +1,7 @@
use async_trait::async_trait;
use derive_new::new;
use log::info;
use serde::Serialize;
use crate::{
domain::{data::Version, interpret::InterpretStatus},
@@ -12,11 +12,14 @@ use crate::{
use crate::domain::score::Score;
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct DhcpScore {
pub host_binding: Vec<HostBinding>,
pub next_server: Option<IpAddress>,
pub boot_filename: Option<String>,
pub filename: Option<String>,
pub filename64: Option<String>,
pub filenameipxe: Option<String>,
}
impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
@@ -117,8 +120,44 @@ impl DhcpInterpret {
None => Outcome::noop(),
};
let filename_outcome = match &self.score.filename {
Some(filename) => {
dhcp_server.set_filename(&filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename to {filename}"),
)
}
None => Outcome::noop(),
};
let filename64_outcome = match &self.score.filename64 {
Some(filename64) => {
dhcp_server.set_filename64(&filename64).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename64 to {filename64}"),
)
}
None => Outcome::noop(),
};
let filenameipxe_outcome = match &self.score.filenameipxe {
Some(filenameipxe) => {
dhcp_server.set_filenameipxe(&filenameipxe).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"),
)
}
None => Outcome::noop(),
};
if next_server_outcome.status == InterpretStatus::NOOP
&& boot_filename_outcome.status == InterpretStatus::NOOP
&& filename_outcome.status == InterpretStatus::NOOP
&& filename64_outcome.status == InterpretStatus::NOOP
&& filenameipxe_outcome.status == InterpretStatus::NOOP
{
return Ok(Outcome::noop());
}
@@ -126,15 +165,19 @@ impl DhcpInterpret {
Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Dhcp Interpret Set next boot to {:?} and boot_filename to {:?}",
self.score.boot_filename, self.score.boot_filename
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
self.score.boot_filename,
self.score.boot_filename,
self.score.filename,
self.score.filename64,
self.score.filenameipxe
),
))
}
}
#[async_trait]
impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret {
impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDHCP
}

View File

@@ -1,6 +1,7 @@
use async_trait::async_trait;
use derive_new::new;
use log::info;
use serde::Serialize;
use crate::{
data::Version,
@@ -10,7 +11,7 @@ use crate::{
topology::{DnsRecord, DnsServer, Topology},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct DnsScore {
dns_entries: Vec<DnsRecord>,
register_dhcp_leases: Option<bool>,

View File

@@ -1,4 +1,5 @@
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::Version,
@@ -10,7 +11,7 @@ use crate::{
/// Score that always errors. This is only useful for development/testing purposes. It does nothing
/// except returning Err(InterpretError) when interpreted.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct ErrorScore;
impl<T: Topology> Score<T> for ErrorScore {
@@ -28,7 +29,7 @@ impl<T: Topology> Score<T> for ErrorScore {
/// Score that always succeeds. This is only useful for development/testing purposes. It does nothing
/// except returning Ok(Outcome::success) when interpreted.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct SuccessScore;
impl<T: Topology> Score<T> for SuccessScore {
@@ -81,7 +82,7 @@ impl<T: Topology> Interpret<T> for DummyInterpret {
/// Score that always panics. This is only useful for development/testing purposes. It does nothing
/// except panic! with an error message when interpreted
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct PanicScore;
impl<T: Topology> Score<T> for PanicScore {

View File

@@ -0,0 +1,256 @@
use crate::data::{Id, Version};
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
use crate::inventory::Inventory;
use crate::score::Score;
use crate::topology::{HelmCommand, Topology};
use async_trait::async_trait;
use helm_wrapper_rs;
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
use log::{debug, info, warn};
pub use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::collections::HashMap;
use std::path::Path;
use std::process::{Command, Output, Stdio};
use std::str::FromStr;
use temp_file::TempFile;
use url::Url;
#[derive(Debug, Clone, Serialize)]
pub struct HelmRepository {
name: String,
url: Url,
force_update: bool,
}
impl HelmRepository {
pub(crate) fn new(name: String, url: Url, force_update: bool) -> Self {
Self {
name,
url,
force_update,
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct HelmChartScore {
pub namespace: Option<NonBlankString>,
pub release_name: NonBlankString,
pub chart_name: NonBlankString,
pub chart_version: Option<NonBlankString>,
pub values_overrides: Option<HashMap<NonBlankString, String>>,
pub values_yaml: Option<String>,
pub create_namespace: bool,
/// Wether to run `helm upgrade --install` under the hood or only install when not present
pub install_only: bool,
pub repository: Option<HelmRepository>,
}
impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HelmChartInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
format!("{} {} HelmChartScore", self.release_name, self.chart_name)
}
}
#[derive(Debug, Serialize)]
pub struct HelmChartInterpret {
pub score: HelmChartScore,
}
impl HelmChartInterpret {
fn add_repo(&self) -> Result<(), InterpretError> {
let repo = match &self.score.repository {
Some(repo) => repo,
None => {
info!("No Helm repository specified in the score. Skipping repository setup.");
return Ok(());
}
};
info!(
"Ensuring Helm repository exists: Name='{}', URL='{}', ForceUpdate={}",
repo.name, repo.url, repo.force_update
);
let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
if repo.force_update {
add_args.push("--force-update");
}
let add_output = run_helm_command(&add_args)?;
let full_output = format!(
"{}\n{}",
String::from_utf8_lossy(&add_output.stdout),
String::from_utf8_lossy(&add_output.stderr)
);
match add_output.status.success() {
true => {
return Ok(());
}
false => {
return Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
)));
}
}
}
}
fn run_helm_command(args: &[&str]) -> Result<Output, InterpretError> {
let command_str = format!("helm {}", args.join(" "));
debug!("Got KUBECONFIG: `{}`", std::env::var("KUBECONFIG").unwrap());
debug!("Running Helm command: `{}`", command_str);
let output = Command::new("helm")
.args(args)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.map_err(|e| {
InterpretError::new(format!(
"Failed to execute helm command '{}': {}. Is helm installed and in PATH?",
command_str, e
))
})?;
if !output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
warn!(
"Helm command `{}` failed with status: {}\nStdout:\n{}\nStderr:\n{}",
command_str, output.status, stdout, stderr
);
} else {
debug!(
"Helm command `{}` finished successfully. Status: {}",
command_str, output.status
);
}
Ok(output)
}
#[async_trait]
impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let ns = self
.score
.namespace
.as_ref()
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
let tf: TempFile;
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
Some(yaml_str) => {
tf = temp_file::with_contents(yaml_str.as_bytes());
Some(tf.path())
}
None => None,
};
self.add_repo()?;
let helm_executor = DefaultHelmExecutor::new_with_opts(
&NonBlankString::from_str("helm").unwrap(),
None,
900,
false,
false,
);
let mut helm_options = Vec::new();
if self.score.create_namespace {
helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
}
if self.score.install_only {
let chart_list = match helm_executor.list(Some(ns)) {
Ok(charts) => charts,
Err(e) => {
return Err(InterpretError::new(format!(
"Failed to list scores in namespace {:?} because of error : {}",
self.score.namespace, e
)));
}
};
if chart_list
.iter()
.any(|item| item.name == self.score.release_name.to_string())
{
info!(
"Release '{}' already exists in namespace '{}'. Skipping installation as install_only is true.",
self.score.release_name, ns
);
return Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
self.score.release_name
),
));
} else {
info!(
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
self.score.release_name, ns
);
}
}
let res = helm_executor.install_or_upgrade(
&ns,
&self.score.release_name,
&self.score.chart_name,
self.score.chart_version.as_ref(),
self.score.values_overrides.as_ref(),
yaml_path,
Some(&helm_options),
);
let status = match res {
Ok(status) => status,
Err(err) => return Err(InterpretError::new(err.to_string())),
};
match status {
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Helm Chart deployed".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
InterpretStatus::RUNNING,
"Helm Chart Pending install".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
InterpretStatus::RUNNING,
"Helm Chart pending upgrade".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(
"Failed to install helm chart".to_string(),
)),
}
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,379 @@
use async_trait::async_trait;
use log::debug;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::collections::HashMap;
use std::env::temp_dir;
use std::ffi::OsStr;
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use temp_dir::{self, TempDir};
use temp_file::TempFile;
use crate::data::{Id, Version};
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
use crate::inventory::Inventory;
use crate::score::Score;
use crate::topology::{HelmCommand, K8sclient, Topology};
#[derive(Clone)]
pub struct HelmCommandExecutor {
pub env: HashMap<String, String>,
pub path: Option<PathBuf>,
pub args: Vec<String>,
pub api_versions: Option<Vec<String>>,
pub kube_version: String,
pub debug: Option<bool>,
pub globals: HelmGlobals,
pub chart: HelmChart,
}
#[derive(Clone)]
pub struct HelmGlobals {
pub chart_home: Option<PathBuf>,
pub config_home: Option<PathBuf>,
}
#[derive(Debug, Clone, Serialize)]
pub struct HelmChart {
pub name: String,
pub version: Option<String>,
pub repo: Option<String>,
pub release_name: Option<String>,
pub namespace: Option<String>,
pub additional_values_files: Vec<PathBuf>,
pub values_file: Option<PathBuf>,
pub values_inline: Option<String>,
pub include_crds: Option<bool>,
pub skip_hooks: Option<bool>,
pub api_versions: Option<Vec<String>>,
pub kube_version: Option<String>,
pub name_template: String,
pub skip_tests: Option<bool>,
pub debug: Option<bool>,
}
impl HelmCommandExecutor {
pub fn generate(mut self) -> Result<String, std::io::Error> {
if self.globals.chart_home.is_none() {
self.globals.chart_home = Some(PathBuf::from("charts"));
}
if self
.clone()
.chart
.clone()
.chart_exists_locally(self.clone().globals.chart_home.unwrap())
.is_none()
{
if self.chart.repo.is_none() {
return Err(std::io::Error::new(
ErrorKind::Other,
"Chart doesn't exist locally and no repo specified",
));
}
self.clone().run_command(
self.chart
.clone()
.pull_command(self.globals.chart_home.clone().unwrap()),
)?;
}
let out = match self.clone().run_command(
self.chart
.clone()
.helm_args(self.globals.chart_home.clone().unwrap()),
) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap here
let s = String::from_utf8(out.stdout).unwrap();
debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
debug!("helm status: {}", out.status);
debug!("helm output: {s}");
let clean = s.split_once("---").unwrap().1;
Ok(clean.to_string())
}
pub fn version(self) -> Result<String, std::io::Error> {
let out = match self.run_command(vec![
"version".to_string(),
"-c".to_string(),
"--short".to_string(),
]) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap
Ok(String::from_utf8(out.stdout).unwrap())
}
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
}
let path = if let Some(p) = self.path {
p
} else {
PathBuf::from("helm")
};
let config_home = match self.globals.config_home {
Some(p) => p,
None => PathBuf::from(TempDir::new()?.path()),
};
match self.chart.values_inline {
Some(yaml_str) => {
let tf: TempFile;
tf = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
}
None => (),
};
self.env.insert(
"HELM_CONFIG_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
self.env.insert(
"HELM_CACHE_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
self.env.insert(
"HELM_DATA_HOME".to_string(),
config_home.to_str().unwrap().to_string(),
);
Command::new(path).envs(self.env).args(args).output()
}
}
impl HelmChart {
pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
let chart_path =
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
if chart_path.exists() {
Some(chart_path)
} else {
None
}
}
pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
let mut args = vec![
"pull".to_string(),
"--untar".to_string(),
"--untardir".to_string(),
chart_home.to_str().unwrap().to_string(),
];
match self.repo {
Some(r) => {
if r.starts_with("oci://") {
args.push(String::from(
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
));
} else {
args.push("--repo".to_string());
args.push(r.to_string());
args.push(self.name);
}
}
None => args.push(self.name),
};
match self.version {
Some(v) => {
args.push("--version".to_string());
args.push(v.to_string());
}
None => (),
}
args
}
pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
let mut args: Vec<String> = vec!["template".to_string()];
match self.release_name {
Some(rn) => args.push(rn.to_string()),
None => args.push("--generate-name".to_string()),
}
args.push(
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
.to_str()
.unwrap()
.to_string(),
);
if let Some(n) = self.namespace {
args.push("--namespace".to_string());
args.push(n.to_string());
}
if let Some(f) = self.values_file {
args.push("-f".to_string());
args.push(f.to_str().unwrap().to_string());
}
for f in self.additional_values_files {
args.push("-f".to_string());
args.push(f.to_str().unwrap().to_string());
}
if let Some(vv) = self.api_versions {
for v in vv {
args.push("--api-versions".to_string());
args.push(v);
}
}
if let Some(kv) = self.kube_version {
args.push("--kube-version".to_string());
args.push(kv);
}
if let Some(crd) = self.include_crds {
if crd {
args.push("--include-crds".to_string());
}
}
if let Some(st) = self.skip_tests {
if st {
args.push("--skip-tests".to_string());
}
}
if let Some(sh) = self.skip_hooks {
if sh {
args.push("--no-hooks".to_string());
}
}
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
}
args
}
}
#[derive(Debug, Clone, Serialize)]
pub struct HelmChartScoreV2 {
pub chart: HelmChart,
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HelmChartInterpretV2 {
score: self.clone(),
})
}
fn name(&self) -> String {
format!(
"{} {} HelmChartScoreV2",
self.chart
.release_name
.clone()
.unwrap_or("Unknown".to_string()),
self.chart.name
)
}
}
#[derive(Debug, Serialize)]
pub struct HelmChartInterpretV2 {
pub score: HelmChartScoreV2,
}
impl HelmChartInterpretV2 {}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let ns = self
.score
.chart
.namespace
.as_ref()
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
let helm_executor = HelmCommandExecutor {
env: HashMap::new(),
path: None,
args: vec![],
api_versions: None,
kube_version: "v1.33.0".to_string(),
debug: Some(false),
globals: HelmGlobals {
chart_home: None,
config_home: None,
},
chart: self.score.chart.clone(),
};
// let mut helm_options = Vec::new();
// if self.score.create_namespace {
// helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
// }
let res = helm_executor.generate();
let output = match res {
Ok(output) => output,
Err(err) => return Err(InterpretError::new(err.to_string())),
};
// TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
// let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
// let client = topology
// .k8s_client()
// .await
// .expect("Environment should provide enough information to instanciate a client")
// .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
// match client.apply_yaml(output) {
// Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
// Err(e) => return Err(InterpretError::new(e)),
// }
Ok(Outcome::success("Helm chart deployed".to_string()))
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,2 @@
pub mod chart;
pub mod command;

View File

@@ -1,5 +1,6 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
@@ -9,7 +10,7 @@ use crate::{
topology::{HttpServer, Topology, Url},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct HttpScore {
files_to_serve: Url,
}

View File

@@ -0,0 +1,66 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::Topology,
};
#[derive(Debug, new, Clone, Serialize)]
pub struct IpxeScore {
//files_to_serve: Url,
}
impl<T: Topology> Score<T> for IpxeScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(IpxeInterpret::new(self.clone()))
}
fn name(&self) -> String {
"IpxeScore".to_string()
}
}
#[derive(Debug, new, Clone)]
pub struct IpxeInterpret {
_score: IpxeScore,
}
#[async_trait]
impl<T: Topology> Interpret<T> for IpxeInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
/*
let http_server = &topology.http_server;
http_server.ensure_initialized().await?;
Ok(Outcome::success(format!(
"Http Server running and serving files from {}",
self.score.files_to_serve
)))
*/
todo!();
}
fn get_name(&self) -> InterpretName {
InterpretName::Ipxe
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,82 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::info;
use serde::Serialize;
use crate::{
config::HARMONY_CONFIG_DIR,
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::Topology,
};
#[derive(Debug, Clone, Serialize)]
pub struct K3DInstallationScore {
pub installation_path: PathBuf,
pub cluster_name: String,
}
impl Default for K3DInstallationScore {
fn default() -> Self {
Self {
installation_path: HARMONY_CONFIG_DIR.join("k3d"),
cluster_name: "harmony".to_string(),
}
}
}
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(K3dInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
todo!()
}
}
#[derive(Debug)]
pub struct K3dInstallationInterpret {
score: K3DInstallationScore,
}
#[async_trait]
impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let k3d = k3d_rs::K3d::new(
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
match k3d.ensure_installed().await {
Ok(_client) => {
let msg = format!("k3d cluster {} is installed ", self.score.cluster_name);
info!("{msg}");
Ok(Outcome::success(msg))
}
Err(msg) => Err(InterpretError::new(format!(
"K3dInstallationInterpret failed to ensure k3d is installed : {msg}"
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::K3dInstallation
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -0,0 +1,2 @@
mod install;
pub use install::*;

View File

@@ -1,19 +1,26 @@
use k8s_openapi::api::apps::v1::Deployment;
use serde::Serialize;
use serde_json::json;
use crate::{interpret::Interpret, score::Score, topology::{OcK8sclient, Topology}};
use crate::{
interpret::Interpret,
score::Score,
topology::{K8sclient, Topology},
};
use super::resource::{K8sResourceInterpret, K8sResourceScore};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct K8sDeploymentScore {
pub name: String,
pub image: String,
pub namespace: Option<String>,
pub env_vars: serde_json::Value,
}
impl <T:Topology + OcK8sclient> Score<T> for K8sDeploymentScore {
impl<T: Topology + K8sclient> Score<T> for K8sDeploymentScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
let deployment: Deployment = serde_json::from_value(json!(
let deployment = json!(
{
"metadata": {
"name": self.name
@@ -33,18 +40,21 @@ impl <T:Topology + OcK8sclient> Score<T> for K8sDeploymentScore {
"spec": {
"containers": [
{
"image": self.image,
"name": self.image
"image": self.image,
"name": self.name,
"imagePullPolicy": "Always",
"env": self.env_vars,
}
]
}
}
}
}
))
.unwrap();
);
let deployment: Deployment = serde_json::from_value(deployment).unwrap();
Box::new(K8sResourceInterpret {
score: K8sResourceScore::single(deployment.clone()),
score: K8sResourceScore::single(deployment.clone(), self.namespace.clone()),
})
}

Some files were not shown because too many files have changed in this diff Show More