Compare commits

...

21 Commits

Author SHA1 Message Date
Ian Letourneau
2f7c4924c1 wip 2025-04-29 16:30:54 -04:00
254f392cb5 feat(HelmScore): Add values yaml option to helm chart score (#23)
Co-authored-by: tahahawa <tahahawa@gmail.com>
Reviewed-on: #23
2025-04-29 16:09:04 +00:00
db9c8d83e6 update adr 2025-04-28 15:09:11 -04:00
20551b4a80 adr for monitoring and alerting 2025-04-28 14:11:44 -04:00
5c026ae6dd chore: improved error message for helm unavailable 2025-04-28 10:11:57 -04:00
76c0cacc1b Merge pull request 'feat: LampScore implement dockerfile generation and image building' (#22) from feat/lampDocker into master
Reviewed-on: #22
Reviewed-by: wjro <wrolleman@nationtech.io>
2025-04-27 19:56:29 +00:00
f17948397f feat: escape PHP_ERROR_REPORTING value in Dockerfile
Escapes the value of the PHP_ERROR_REPORTING environment variable in the Dockerfile to prevent potential issues with shell interpretation. Uses EnvBuilder for a more structured approach.
2025-04-27 15:55:12 -04:00
16a665241e feat: LampScore implement dockerfile generation and image building
- Added `build_dockerfile` function to generate a Dockerfile based on the LAMP stack for the given project.
- Implemented `build_docker_image` to execute the docker build command and create the image.
- Configured user and permissions for apache.
- Included necessary apache configuration for security.
- Added error handling for docker build failures.
- Exposed port 80 for external access.
- Added basic serialization to Config struct.
2025-04-25 14:34:57 -04:00
065e3904b8 Merge pull request 'fix(k8s_anywhere): Ensure k3d cluster is started before use' (#21) from feat/k3d into master
Reviewed-on: #21
Reviewed-by: wjro <wrolleman@nationtech.io>
Reviewed-by: taha <taha@noreply.git.nationtech.io>
2025-04-25 16:46:28 +00:00
22752960f9 fix(k8s_anywhere): Ensure k3d cluster is started before use
- Refactor k3d cluster management to explicitly start the cluster.
- Introduce `start_cluster` function to ensure cluster is running before operations.
- Improve error handling and logging during cluster startup.
- Update `create_cluster` and other related functions to utilize the new startup mechanism.
- Enhance reliability and prevent potential issues caused by an uninitialized cluster.
- Add `run_k3d_command` to handle k3d commands with logging and error handling.
2025-04-25 12:45:02 -04:00
23971ecd7c Merge pull request 'feat: implement k3d cluster management' (#20) from feat/k3d into master
Reviewed-on: #20
Reviewed-by: wjro <wrolleman@nationtech.io>
Reviewed-by: taha <taha@noreply.git.nationtech.io>
2025-04-25 15:33:13 +00:00
fbcd3e4f7f feat: implement k3d cluster management
- Adds functionality to download, install, and manage k3d clusters.
- Includes methods for downloading the latest release, creating clusters, and verifying cluster existence.
- Implements `ensure_k3d_installed`, `get_latest_release_tag`, `download_latest_release`, `is_k3d_installed`, `verify_cluster_exists`, `create_cluster` and `create_kubernetes_client`.
- Provides a `get_client` method to access the Kubernetes client.
- Includes unit tests for download and installation.
- Adds handling for different operating systems.
- Improves error handling and logging.
- Introduces a `K3d` struct to encapsulate k3d cluster management logic.
- Adds the ability to specify the cluster name during K3d initialization.
2025-04-24 17:36:01 -04:00
d307893f15 fix: small-fixes (#19)
Reviewed-on: #19
Reviewed-by: johnride <jg@nationtech.io>
Co-authored-by: Taha Hawa <taha@taha.dev>
Co-committed-by: Taha Hawa <taha@taha.dev>
2025-04-24 18:47:47 +00:00
00c0566533 Merge pull request 'feat: introduce Maestro::initialize function that creates the maestro instance and ensure_ready the topology as well. Also refactor all relevant examples to use this new initialize function' (#18) from feat/maestroinitialize into master
Reviewed-on: #18
Reviewed-by: taha <taha@noreply.git.nationtech.io>
2025-04-24 17:43:31 +00:00
f5e3f1aaea feat: Add check.sh helper script to make sure code looks OK before pushing 2025-04-24 13:16:20 -04:00
508b97ca7c chore: Fix more warnings 2025-04-24 13:14:35 -04:00
80bdd0ee8a feat: introduce Maestro::initialize function that creates the maestro instance and ensure_ready the topology as well. Also refactor all relevant examples to use this new initialize function 2025-04-24 12:58:41 -04:00
6c06a4ae07 feat: update ensure_ready to check helm is available (#17)
I want to make sure the changes I'm working on in the ensure_ready don't break anything

Reviewed-on: #17
Reviewed-by: taha <taha@noreply.git.nationtech.io>
Co-authored-by: Willem <wrolleman@nationtech.io>
Co-committed-by: Willem <wrolleman@nationtech.io>
2025-04-24 15:51:28 +00:00
ad1aa897b1 Merge pull request 'chore: Fix all warnings in the project, ignore unused variables mostly' (#16) from chore/warnings into master
Reviewed-on: #16
Reviewed-by: wjro <wrolleman@nationtech.io>
2025-04-24 14:28:14 +00:00
dccc9c04f5 chore: Fix all warnings in the project, ignore unused variables mostly 2025-04-24 10:22:53 -04:00
9345e63a32 fix: couple of changes to get a test working 2025-04-23 15:31:02 -04:00
38 changed files with 933 additions and 185 deletions

138
Cargo.lock generated
View File

@@ -356,9 +356,9 @@ dependencies = [
[[package]]
name = "cc"
version = "1.2.19"
version = "1.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362"
checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a"
dependencies = [
"shlex",
]
@@ -382,9 +382,9 @@ dependencies = [
[[package]]
name = "chrono"
version = "0.4.40"
version = "0.4.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c"
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
dependencies = [
"android-tzdata",
"iana-time-zone",
@@ -519,7 +519,7 @@ version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
dependencies = [
"getrandom 0.2.15",
"getrandom 0.2.16",
"once_cell",
"tiny-keccak",
]
@@ -795,6 +795,27 @@ dependencies = [
"subtle",
]
[[package]]
name = "directories"
version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16f5094c54661b38d03bd7e50df373292118db60b585c08a411c6d840017fe7d"
dependencies = [
"dirs-sys",
]
[[package]]
name = "dirs-sys"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab"
dependencies = [
"libc",
"option-ext",
"redox_users",
"windows-sys 0.59.0",
]
[[package]]
name = "displaydoc"
version = "0.2.5"
@@ -812,6 +833,28 @@ version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
[[package]]
name = "dockerfile_builder"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ac372e31c7dd054d0fc69ca96ca36ee8d1cf79881683ad6f783c47aba3dc6e2"
dependencies = [
"dockerfile_builder_macros",
"eyre",
]
[[package]]
name = "dockerfile_builder_macros"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b627d9019ce257916c7ada6f233cf22e1e5246b6d9426b20610218afb7fd3ec9"
dependencies = [
"eyre",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "dyn-clone"
version = "1.0.19"
@@ -961,6 +1004,7 @@ dependencies = [
"harmony",
"harmony_macros",
"http 1.3.1",
"inquire",
"k8s-openapi",
"kube",
"log",
@@ -1245,9 +1289,9 @@ dependencies = [
[[package]]
name = "getrandom"
version = "0.2.15"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
dependencies = [
"cfg-if",
"js-sys",
@@ -1340,14 +1384,18 @@ dependencies = [
"async-trait",
"cidr",
"derive-new",
"directories",
"dockerfile_builder",
"env_logger",
"harmony_macros",
"harmony_types",
"helm-wrapper-rs",
"http 1.3.1",
"inquire",
"k3d-rs",
"k8s-openapi",
"kube",
"lazy_static",
"libredfish",
"log",
"non-blank-string-rs",
@@ -1361,6 +1409,7 @@ dependencies = [
"serde-value",
"serde_json",
"serde_yaml",
"temp-file",
"tokio",
"url",
"uuid",
@@ -1372,6 +1421,7 @@ version = "0.1.0"
dependencies = [
"assert_cmd",
"clap",
"env_logger",
"harmony",
"harmony_tui",
"inquire",
@@ -2015,9 +2065,9 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "jiff"
version = "0.2.8"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5ad87c89110f55e4cd4dc2893a9790820206729eaf221555f742d540b0724a0"
checksum = "5a064218214dc6a10fbae5ec5fa888d80c45d611aba169222fc272072bf7aef6"
dependencies = [
"jiff-static",
"log",
@@ -2028,9 +2078,9 @@ dependencies = [
[[package]]
name = "jiff-static"
version = "0.2.8"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d076d5b64a7e2fe6f0743f02c43ca4a6725c0f904203bfe276a5b3e793103605"
checksum = "199b7932d97e325aff3a7030e141eafe7f2c6268e1d1b24859b753a627f45254"
dependencies = [
"proc-macro2",
"quote",
@@ -2083,6 +2133,7 @@ dependencies = [
"env_logger",
"futures-util",
"httptest",
"kube",
"log",
"octocrab",
"pretty_assertions",
@@ -2188,9 +2239,9 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
[[package]]
name = "libm"
version = "0.2.11"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa"
checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72"
[[package]]
name = "libredfish"
@@ -2205,6 +2256,16 @@ dependencies = [
"serde_json",
]
[[package]]
name = "libredox"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
dependencies = [
"bitflags 2.9.0",
"libc",
]
[[package]]
name = "linux-raw-sys"
version = "0.4.15"
@@ -2570,6 +2631,12 @@ dependencies = [
"yaserde_derive",
]
[[package]]
name = "option-ext"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "ordered-float"
version = "2.10.1"
@@ -2881,7 +2948,7 @@ version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy 0.8.24",
"zerocopy 0.8.25",
]
[[package]]
@@ -3007,7 +3074,7 @@ version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.15",
"getrandom 0.2.16",
]
[[package]]
@@ -3049,6 +3116,17 @@ dependencies = [
"bitflags 2.9.0",
]
[[package]]
name = "redox_users"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b"
dependencies = [
"getrandom 0.2.16",
"libredox",
"thiserror 2.0.12",
]
[[package]]
name = "regex"
version = "1.11.1"
@@ -3182,7 +3260,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
dependencies = [
"cc",
"cfg-if",
"getrandom 0.2.15",
"getrandom 0.2.16",
"libc",
"untrusted",
"windows-sys 0.52.0",
@@ -3729,9 +3807,9 @@ dependencies = [
[[package]]
name = "signal-hook-registry"
version = "1.4.2"
version = "1.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410"
dependencies = [
"libc",
]
@@ -3919,9 +3997,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
version = "2.0.100"
version = "2.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
dependencies = [
"proc-macro2",
"quote",
@@ -4002,6 +4080,12 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "temp-file"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5ff282c3f91797f0acb021f3af7fffa8a78601f0f2fd0a9f79ee7dcf9a9af9e"
[[package]]
name = "tempfile"
version = "3.19.1"
@@ -4182,9 +4266,9 @@ dependencies = [
[[package]]
name = "tokio-util"
version = "0.7.14"
version = "0.7.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034"
checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df"
dependencies = [
"bytes",
"futures-core",
@@ -5019,11 +5103,11 @@ dependencies = [
[[package]]
name = "zerocopy"
version = "0.8.24"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
dependencies = [
"zerocopy-derive 0.8.24",
"zerocopy-derive 0.8.25",
]
[[package]]
@@ -5039,9 +5123,9 @@ dependencies = [
[[package]]
name = "zerocopy-derive"
version = "0.8.24"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
dependencies = [
"proc-macro2",
"quote",

View File

@@ -0,0 +1,68 @@
# Architecture Decision Record: Monitoring and Alerting
Proposed by: Willem Rolleman
Date: April 28 2025
## Status
Proposed
## Context
A harmony user should be able to initialize a monitoring stack easily, either at the first run of Harmony, or that integrates with existing proects and infra without creating multiple instances of the monitoring stack or overwriting existing alerts/configurations.The user also needs a simple way to configure the stack so that it watches the projects. There should be reasonable defaults configured that are easily customizable for each project
## Decision
Create MonitoringStack score that creates a maestro to launch the monitoring stack or not if it is already present.
The MonitoringStack score can be passed to the maestro in the vec! scores list
## Rationale
Having the score launch a maestro will allow the user to easily create a new monitoring stack and keeps composants grouped together. The MonitoringScore can handle all the logic for adding alerts, ensuring that the stack is running etc.
## Alerternatives considered
- ### Implement alerting and monitoring stack using existing HelmScore for each project
- **Pros**:
- Each project can choose to use the monitoring and alerting stack that they choose
- Less overhead in terms of care harmony code
- can add Box::new(grafana::grafanascore(namespace))
- **Cons**:
- No default solution implemented
- Dev needs to chose what they use
- Increases complexity of score projects
- Each project will create a new monitoring and alerting instance rather than joining the existing one
- ### Use OKD grafana and prometheus
- **Pros**:
- Minimal config to do in Harmony
- **Cons**:
- relies on OKD so will not working for local testing via k3d
- ### Create a monitoring and alerting crate similar to harmony tui
- **Pros**:
- Creates a default solution that can be implemented once by harmony
- can create a join function that will allow a project to connect to the existing solution
- eliminates risk of creating multiple instances of grafana or prometheus
- **Cons**:
- more complex than using a helm score
- management of values files for individual functions becomes more complicated, ie how do you create alerts for one project via helm install that doesnt overwrite the other alerts
- ### Add monitoring to Maestro struct so whether the monitoring stack is used must be defined
- **Pros**:
- less for the user to define
- may be easier to set defaults
- **Cons**:
- feels counterintuitive
- would need to modify the structure of the maestro and how it operates which seems like a bad idea
- unclear how to allow user to pass custom values/configs to the monitoring stack for subsequent projects
- ### Create MonitoringStack score to add to scores vec! which loads a maestro to install stack if not ready or add custom endpoints/alerts to existing stack
- **Pros**:
- Maestro already accepts a list of scores to initialize
- leaving out the monitoring score simply means the user does not want monitoring
- if the monitoring stack is already created, the MonitoringStack score doesn't necessarily need to be added to each project
- composants of the monitoring stack are bundled together and can be expaned or modified from the same place
- **Cons**:
- maybe need to create

5
check.sh Normal file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
cargo check --all-targets --all-features --keep-going
cargo fmt --check
cargo test

View File

@@ -2,14 +2,14 @@ use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::HAClusterTopology,
topology::LocalhostTopology,
};
#[tokio::main]
async fn main() {
let inventory = Inventory::autoload();
let topology = HAClusterTopology::autoload();
let mut maestro = Maestro::new(inventory, topology);
let topology = LocalhostTopology::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),

View File

@@ -18,3 +18,4 @@ kube = "0.98.0"
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
http = "1.2.0"
serde_yaml = "0.9.34"
inquire.workspace = true

View File

@@ -1,20 +1,32 @@
use std::collections::BTreeMap;
use harmony_macros::yaml;
use inquire::Confirm;
use k8s_openapi::{
api::{
apps::v1::{Deployment, DeploymentSpec},
core::v1::{Container, Node, Pod, PodSpec, PodTemplateSpec},
core::v1::{Container, PodSpec, PodTemplateSpec},
},
apimachinery::pkg::apis::meta::v1::LabelSelector,
};
use kube::{
Api, Client, Config, ResourceExt,
api::{ListParams, ObjectMeta, PostParams},
Api, Client, ResourceExt,
api::{ObjectMeta, PostParams},
};
#[tokio::main]
async fn main() {
let confirmation = Confirm::new(
"This will install various ressources to your default kubernetes cluster. Are you sure?",
)
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
if !confirmation {
return;
}
let client = Client::try_default()
.await
.expect("Should instanciate client from defaults");
@@ -42,8 +54,7 @@ async fn main() {
// println!("found node {} status {:?}", n.name_any(), n.status.unwrap())
// }
let nginxdeployment = nginx_deployment_2();
let nginxdeployment = nginx_deployment_serde();
assert_eq!(nginx_deployment(), nginx_macro());
assert_eq!(nginx_deployment_2(), nginx_macro());
assert_eq!(nginx_deployment_serde(), nginx_macro());
let nginxdeployment = nginx_macro();
@@ -149,6 +160,7 @@ fn nginx_deployment_2() -> Deployment {
deployment
}
fn nginx_deployment() -> Deployment {
let deployment = Deployment {
metadata: ObjectMeta {

View File

@@ -1,12 +1,15 @@
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::lamp::{LAMPConfig, LAMPScore},
modules::lamp::{LAMPConfig, LAMPProfile, LAMPScore},
topology::{K8sAnywhereTopology, Url},
};
use std::collections::HashMap;
#[tokio::main]
async fn main() {
// let _ = env_logger::Builder::from_default_env().filter_level(log::LevelFilter::Info).try_init();
let lamp_stack = LAMPScore {
name: "harmony-lamp-demo".to_string(),
domain: Url::Url(url::Url::parse("https://lampdemo.harmony.nationtech.io").unwrap()),
@@ -15,9 +18,18 @@ async fn main() {
project_root: "./php".into(),
..Default::default()
},
profiles: HashMap::from([
("dev", LAMPProfile { ssl_enabled: false }),
("prod", LAMPProfile { ssl_enabled: true }),
]),
};
let mut maestro = Maestro::<K8sAnywhereTopology>::load_from_env();
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
maestro.register_all(vec![Box::new(lamp_stack)]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -1,10 +1,7 @@
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
k8s::deployment::K8sDeploymentScore,
},
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::HAClusterTopology,
};
@@ -12,7 +9,7 @@ use harmony::{
async fn main() {
let inventory = Inventory::autoload();
let topology = HAClusterTopology::autoload();
let mut maestro = Maestro::new(inventory, topology);
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
// ADD scores :

View File

@@ -84,7 +84,7 @@ async fn main() {
let http_score = HttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let mut maestro = Maestro::new(inventory, topology);
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),

View File

@@ -7,11 +7,9 @@ use harmony::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
load_balancer::LoadBalancerScore,
okd::load_balancer::OKDLoadBalancerScore,
},
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode,
LoadBalancerService,
BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService,
},
};
use harmony_macros::ipv4;
@@ -19,8 +17,8 @@ use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
let inventory = Inventory::autoload();
let topology = HAClusterTopology::autoload();
let mut maestro = Maestro::new(inventory, topology);
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),

View File

@@ -33,3 +33,8 @@ serde-value = { workspace = true }
inquire.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"

View File

@@ -0,0 +1,9 @@
use lazy_static::lazy_static;
use std::path::PathBuf;
lazy_static! {
pub static ref HARMONY_CONFIG_DIR: PathBuf = directories::BaseDirs::new()
.unwrap()
.data_dir()
.join("harmony");
}

View File

@@ -7,7 +7,6 @@ use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::Topology,
};
pub enum InterpretName {
@@ -40,8 +39,12 @@ impl std::fmt::Display for InterpretName {
#[async_trait]
pub trait Interpret<T>: std::fmt::Debug + Send {
async fn execute(&self, inventory: &Inventory, topology: &T)
-> Result<Outcome, InterpretError>;
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
profile: &String,
) -> Result<Outcome, InterpretError>;
fn get_name(&self) -> InterpretName;
fn get_version(&self) -> Version;
fn get_status(&self) -> InterpretStatus;

View File

@@ -16,18 +16,27 @@ pub struct Maestro<T: Topology> {
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_preparation_result: Mutex<Option<Outcome>>,
profile: String,
}
impl<T: Topology> Maestro<T> {
pub fn new(inventory: Inventory, topology: T) -> Self {
pub fn new(inventory: Inventory, topology: T, profile: String) -> Self {
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_preparation_result: None.into(),
profile,
}
}
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let profile = "dev".to_string(); // TODO: retrieve from env?
let instance = Self::new(inventory, topology, profile);
instance.prepare_topology().await?;
Ok(instance)
}
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
@@ -46,27 +55,6 @@ impl<T: Topology> Maestro<T> {
Ok(outcome)
}
// Load the inventory and inventory from environment.
// This function is able to discover the context that it is running in, such as k8s clusters, aws cloud, linux host, etc.
// When the HARMONY_TOPOLOGY environment variable is not set, it will default to install k3s
// locally (lazily, if not installed yet, when the first execution occurs) and use that as a topology
// So, by default, the inventory is a single host that the binary is running on, and the
// topology is a single node k3s
//
// By default :
// - Linux => k3s
// - macos, windows => docker compose
//
// To run more complex cases like OKDHACluster, either provide the default target in the
// harmony infrastructure as code or as an environment variable
pub fn load_from_env() -> Self {
// Load env var HARMONY_TOPOLOGY
match std::env::var("HARMONY_TOPOLOGY") {
Ok(_) => todo!(),
Err(_) => todo!(),
}
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
let mut score_mut = self.scores.write().expect("Should acquire lock");
score_mut.append(&mut scores);
@@ -93,9 +81,11 @@ impl<T: Topology> Maestro<T> {
);
}
info!("Running score {score:?}");
let interpret = score.create_interpret();
let interpret = score.apply_profile(&self.profile).create_interpret();
info!("Launching interpret {interpret:?}");
let result = interpret.execute(&self.inventory, &self.topology).await;
let result = interpret
.execute(&self.inventory, &self.topology, &self.profile)
.await;
info!("Got result {result:?}");
result
}

View File

@@ -1,3 +1,4 @@
pub mod config;
pub mod data;
pub mod executors;
pub mod filter;

View File

@@ -8,6 +8,9 @@ use super::{interpret::Interpret, topology::Topology};
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
fn apply_profile(&self, profile: &String) -> Box<dyn Score<T>> {
Box::new(self.clone())
}
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
}
@@ -82,7 +85,7 @@ where
};
let formatted_val = self.format_value_as_string(v, indent + 1);
let mut lines = formatted_val.lines().map(|line| line.trim_start());
let lines = formatted_val.lines().map(|line| line.trim_start());
let wrapped_lines: Vec<_> = lines
.flat_map(|line| self.wrap_or_truncate(line.trim_start(), 48))
@@ -218,7 +221,7 @@ where
mod tests {
use super::*;
use crate::modules::dns::DnsScore;
use crate::topology::{self, HAClusterTopology};
use crate::topology::HAClusterTopology;
#[test]
fn test_format_values_as_string() {

View File

@@ -1,6 +1,7 @@
use async_trait::async_trait;
use harmony_macros::ip;
use harmony_types::net::MacAddress;
use log::info;
use crate::executors::ExecutorError;
use crate::interpret::InterpretError;
@@ -56,8 +57,10 @@ impl Topology for HAClusterTopology {
#[async_trait]
impl K8sclient for HAClusterTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, kube::Error> {
Ok(Arc::new(K8sClient::try_default().await?))
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
Ok(Arc::new(
K8sClient::try_default().await.map_err(|e| e.to_string())?,
))
}
}
@@ -223,7 +226,20 @@ impl HttpServer for HAClusterTopology {
}
#[derive(Debug)]
struct DummyInfra;
pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
todo!()
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(Outcome::success(dummy_msg.to_string()))
}
}
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";

View File

@@ -1,7 +1,9 @@
use derive_new::new;
use k8s_openapi::NamespaceResourceScope;
use kube::{Api, Client, Error, Resource, api::PostParams};
use serde::de::DeserializeOwned;
#[derive(new)]
pub struct K8sClient {
client: Client,
}

View File

@@ -1,4 +1,4 @@
use std::io;
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use inquire::Confirm;
@@ -13,25 +13,63 @@ use crate::{
topology::LocalhostTopology,
};
use super::{Topology, k8s::K8sClient};
use super::{HelmCommand, K8sclient, Topology, k8s::K8sClient};
struct K8sState {
client: K8sClient,
source: K8sSource,
client: Arc<K8sClient>,
_source: K8sSource,
message: String,
}
enum K8sSource {
RemoteCluster,
LocalK3d,
// TODO: Add variants for cloud providers like AwsEks, Gke, Aks
}
pub struct K8sAnywhereTopology {
k8s_state: OnceCell<Option<K8sState>>,
}
#[async_trait]
impl K8sclient for K8sAnywhereTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
let state = match self.k8s_state.get() {
Some(state) => state,
None => return Err("K8s state not initialized yet".to_string()),
};
let state = match state {
Some(state) => state,
None => return Err("K8s client initialized but empty".to_string()),
};
Ok(state.client.clone())
}
}
impl K8sAnywhereTopology {
pub fn new() -> Self {
Self {
k8s_state: OnceCell::new(),
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
.output()
.map_err(|e| format!("Failed to execute 'helm -version': {}", e))?;
if !version_result.status.success() {
return Err("Failed to run 'helm -version'".to_string());
}
// Print the version output
let version_output = String::from_utf8_lossy(&version_result.stdout);
println!("Helm version: {}", version_output.trim());
Ok(())
}
async fn try_load_system_kubeconfig(&self) -> Option<K8sClient> {
todo!("Use kube-rs default behavior to load system kubeconfig");
}
@@ -40,13 +78,15 @@ impl K8sAnywhereTopology {
todo!("Use kube-rs to load kubeconfig at path {path}");
}
async fn try_install_k3d(&self) -> Result<K8sClient, InterpretError> {
let maestro = Maestro::new(Inventory::autoload(), LocalhostTopology::new());
let k3d_score = K3DInstallationScore::new();
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
let maestro = Maestro::initialize(Inventory::autoload(), LocalhostTopology::new()).await?;
let k3d_score = self.get_k3d_installation_score();
maestro.interpret(Box::new(k3d_score)).await?;
todo!(
"Create Maestro with LocalDockerTopology or something along these lines and run a K3dInstallationScore on it"
);
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
@@ -62,14 +102,14 @@ impl K8sAnywhereTopology {
if k8s_anywhere_config.use_system_kubeconfig {
match self.try_load_system_kubeconfig().await {
Some(client) => todo!(),
Some(_client) => todo!(),
None => todo!(),
}
}
if let Some(kubeconfig) = k8s_anywhere_config.kubeconfig {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(client) => todo!(),
Some(_client) => todo!(),
None => todo!(),
}
}
@@ -91,14 +131,24 @@ impl K8sAnywhereTopology {
}
info!("Starting K8sAnywhere installation");
match self.try_install_k3d().await {
Ok(client) => Ok(Some(K8sState {
client,
source: K8sSource::LocalK3d,
self.try_install_k3d().await?;
let k3d_score = self.get_k3d_installation_score();
// I feel like having to rely on the k3d_rs crate here is a smell
// I think we should have a way to interact more deeply with scores/interpret. Maybe the
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
// good implementation due to the stateful nature of the k3d thing. Which is why I went
// with this solution for now
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),
_source: K8sSource::LocalK3d,
message: "Successfully installed K3D cluster and acquired client".to_string(),
})),
},
Err(_) => todo!(),
}
};
Ok(Some(state))
}
}
@@ -126,19 +176,27 @@ struct K8sAnywhereConfig {
#[async_trait]
impl Topology for K8sAnywhereTopology {
fn name(&self) -> &str {
todo!()
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
match self
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?
{
Some(k8s_state) => Ok(Outcome::success(k8s_state.message.clone())),
None => Err(InterpretError::new(
"No K8s client could be found or installed".to_string(),
)),
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
match self.is_helm_available() {
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}
impl HelmCommand for K8sAnywhereTopology {}

View File

@@ -3,7 +3,7 @@ use derive_new::new;
use crate::interpret::{InterpretError, Outcome};
use super::Topology;
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
@@ -20,3 +20,6 @@ impl Topology for LocalhostTopology {
))
}
}
// TODO: Delete this, temp for test
impl HelmCommand for LocalhostTopology {}

View File

@@ -42,8 +42,8 @@ pub struct NetworkDomain {
pub name: String,
}
#[async_trait]
pub trait K8sclient: Send + Sync + std::fmt::Debug {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, kube::Error>;
pub trait K8sclient: Send + Sync {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
}
#[async_trait]

View File

@@ -370,13 +370,10 @@ mod tests {
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(
result,
vec![BackendServer {
address: "192.168.1.1".to_string(),
port: 80,
},]
);
assert_eq!(result, vec![BackendServer {
address: "192.168.1.1".to_string(),
port: 80,
},]);
}
#[test]
fn test_get_servers_for_backend_no_linked_servers() {
@@ -433,18 +430,15 @@ mod tests {
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result
assert_eq!(
result,
vec![
BackendServer {
address: "some-hostname.test.mcd".to_string(),
port: 80,
},
BackendServer {
address: "192.168.1.2".to_string(),
port: 8080,
},
]
);
assert_eq!(result, vec![
BackendServer {
address: "some-hostname.test.mcd".to_string(),
port: 80,
},
BackendServer {
address: "192.168.1.2".to_string(),
port: 8080,
},
]);
}
}

View File

@@ -75,6 +75,7 @@ impl<T: Topology> Interpret<T> for DummyInterpret {
&self,
_inventory: &Inventory,
_topology: &T,
_profile: &String,
) -> Result<Outcome, InterpretError> {
self.result.clone()
}
@@ -121,6 +122,7 @@ impl<T: Topology> Interpret<T> for PanicInterpret {
&self,
_inventory: &Inventory,
_topology: &T,
_profile: &String,
) -> Result<Outcome, InterpretError> {
panic!("Panic interpret always panics when executed")
}

View File

@@ -6,28 +6,31 @@ use crate::topology::{HelmCommand, Topology};
use async_trait::async_trait;
use helm_wrapper_rs;
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
use non_blank_string_rs::NonBlankString;
pub use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use serde::de::DeserializeOwned;
use std::collections::HashMap;
use std::path::PathBuf;
use std::path::Path;
use temp_file::TempFile;
#[derive(Debug, Clone, Serialize)]
pub struct HelmChartScore {
pub namespace: Option<NonBlankString>,
pub release_name: NonBlankString,
pub chart_name: NonBlankString,
pub chart_version: NonBlankString,
pub chart_version: Option<NonBlankString>,
pub values_overrides: Option<HashMap<NonBlankString, String>>,
pub values_yaml: Option<String>,
}
impl<T: Topology> Score<T> for HelmChartScore {
impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
todo!()
Box::new(HelmChartInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
"HelmChartScore".to_string()
format!("{} {} HelmChartScore", self.release_name, self.chart_name)
}
}
@@ -47,17 +50,28 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
.score
.namespace
.as_ref()
.unwrap_or(todo!("Get namespace from active kubernetes cluster"));
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
let tf: TempFile;
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
Some(yaml_str) => {
tf = temp_file::with_contents(yaml_str.as_bytes());
Some(tf.path())
}
None => None,
};
let helm_executor = DefaultHelmExecutor::new();
let res = helm_executor.install_or_upgrade(
ns,
&ns,
&self.score.release_name,
&self.score.chart_name,
Some(&self.score.chart_version),
self.score.chart_version.as_ref(),
self.score.values_overrides.as_ref(),
None,
yaml_path,
None,
);
let status = match res {
Ok(status) => status,
Err(err) => return Err(InterpretError::new(err.to_string())),

View File

@@ -1,7 +1,11 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::info;
use serde::Serialize;
use crate::{
config::HARMONY_CONFIG_DIR,
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
@@ -10,26 +14,25 @@ use crate::{
};
#[derive(Debug, Clone, Serialize)]
pub struct K3DInstallationScore {}
pub struct K3DInstallationScore {
pub installation_path: PathBuf,
pub cluster_name: String,
}
impl K3DInstallationScore {
pub fn new() -> Self {
Self {}
impl Default for K3DInstallationScore {
fn default() -> Self {
Self {
installation_path: HARMONY_CONFIG_DIR.join("k3d"),
cluster_name: "harmony".to_string(),
}
}
}
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
todo!("
1. Decide if I create a new crate for k3d management, especially to avoid the ocrtograb dependency
2. Implement k3d management
3. Find latest tag
4. Download k3d to some path managed by harmony (or not?)
5. Bootstrap cluster
6. Get kubeconfig
7. Load kubeconfig in k8s anywhere
8. Complete k8sanywhere setup
")
Box::new(K3dInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
@@ -38,16 +41,31 @@ impl<T: Topology> Score<T> for K3DInstallationScore {
}
#[derive(Debug)]
struct K3dInstallationInterpret {}
pub struct K3dInstallationInterpret {
score: K3DInstallationScore,
}
#[async_trait]
impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
todo!()
let k3d = k3d_rs::K3d::new(
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
match k3d.ensure_installed().await {
Ok(_client) => {
let msg = format!("k3d cluster {} is installed ", self.score.cluster_name);
info!("{msg}");
Ok(Outcome::success(msg))
}
Err(msg) => Err(InterpretError::new(format!(
"K3dInstallationInterpret failed to ensure k3d is installed : {msg}"
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::K3dInstallation

View File

@@ -1,6 +1,9 @@
use std::collections::HashMap;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
use async_trait::async_trait;
use log::info;
use serde::Serialize;
use crate::{
@@ -18,6 +21,7 @@ pub struct LAMPScore {
pub domain: Url,
pub config: LAMPConfig,
pub php_version: Version,
pub profiles: HashMap<&'static str, LAMPProfile>,
}
#[derive(Debug, Clone, Serialize)]
@@ -26,6 +30,11 @@ pub struct LAMPConfig {
pub ssl_enabled: bool,
}
#[derive(Debug, Clone, Serialize)]
pub struct LAMPProfile {
pub ssl_enabled: bool,
}
impl Default for LAMPConfig {
fn default() -> Self {
LAMPConfig {
@@ -35,9 +44,28 @@ impl Default for LAMPConfig {
}
}
impl<T: Topology> Score<T> for LAMPScore {
impl<T: Topology + K8sclient> Score<T> for LAMPScore {
fn apply_profile(&self, profile: &String) -> Box<dyn Score<T>> {
let profile = match self.profiles.get(profile.as_str()) {
Some(profile) => profile,
None => panic!("Not good"), // TODO: better handling
};
let config = LAMPConfig {
ssl_enabled: profile.ssl_enabled,
..self.config.clone()
};
Box::new(LAMPScore {
config,
..self.clone()
})
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
todo!()
Box::new(LAMPInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
@@ -56,15 +84,29 @@ impl<T: Topology + K8sclient> Interpret<T> for LAMPInterpret {
&self,
inventory: &Inventory,
topology: &T,
profile: &String,
) -> Result<Outcome, InterpretError> {
let image_name = match self.build_docker_image() {
Ok(name) => name,
Err(e) => {
return Err(InterpretError::new(format!(
"Could not build LAMP docker image {e}"
)));
}
};
info!("LAMP docker image built {image_name}");
let deployment_score = K8sDeploymentScore {
name: <LAMPScore as Score<T>>::name(&self.score),
image: "local_image".to_string(),
image: image_name,
};
info!("LAMP deployment_score {deployment_score:?}");
todo!();
deployment_score
.apply_profile(profile)
.create_interpret()
.execute(inventory, topology)
.execute(inventory, topology, profile)
.await?;
todo!()
}
@@ -85,3 +127,164 @@ impl<T: Topology + K8sclient> Interpret<T> for LAMPInterpret {
todo!()
}
}
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, WORKDIR};
use dockerfile_builder::{Dockerfile, instruction_builder::EnvBuilder};
use std::fs;
impl LAMPInterpret {
pub fn build_dockerfile(
&self,
score: &LAMPScore,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut dockerfile = Dockerfile::new();
// Use the PHP version from the score to determine the base image
let php_version = score.php_version.to_string();
let php_major_minor = php_version
.split('.')
.take(2)
.collect::<Vec<&str>>()
.join(".");
// Base image selection - using official PHP image with Apache
dockerfile.push(FROM::from(format!("php:{}-apache", php_major_minor)));
// Set environment variables for PHP configuration
dockerfile.push(ENV::from("PHP_MEMORY_LIMIT=256M"));
dockerfile.push(ENV::from("PHP_MAX_EXECUTION_TIME=30"));
dockerfile.push(
EnvBuilder::builder()
.key("PHP_ERROR_REPORTING")
.value("\"E_ERROR | E_WARNING | E_PARSE\"")
.build()
.unwrap(),
);
// Install necessary PHP extensions and dependencies
dockerfile.push(RUN::from(
"apt-get update && \
apt-get install -y --no-install-recommends \
libfreetype6-dev \
libjpeg62-turbo-dev \
libpng-dev \
libzip-dev \
unzip \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*",
));
dockerfile.push(RUN::from(
"docker-php-ext-configure gd --with-freetype --with-jpeg && \
docker-php-ext-install -j$(nproc) \
gd \
mysqli \
pdo_mysql \
zip \
opcache",
));
// Copy PHP configuration
dockerfile.push(RUN::from("mkdir -p /usr/local/etc/php/conf.d/"));
// Create and copy a custom PHP configuration
let php_config = r#"
memory_limit = ${PHP_MEMORY_LIMIT}
max_execution_time = ${PHP_MAX_EXECUTION_TIME}
error_reporting = ${PHP_ERROR_REPORTING}
display_errors = Off
log_errors = On
error_log = /dev/stderr
date.timezone = UTC
; Opcache configuration for production
opcache.enable=1
opcache.memory_consumption=128
opcache.interned_strings_buffer=8
opcache.max_accelerated_files=4000
opcache.revalidate_freq=2
opcache.fast_shutdown=1
"#;
// Save this configuration to a temporary file within the project root
let config_path = Path::new(&score.config.project_root).join("docker-php.ini");
fs::write(&config_path, php_config)?;
// Reference the file within the Docker context (where the build runs)
dockerfile.push(COPY::from(
"docker-php.ini /usr/local/etc/php/conf.d/docker-php.ini",
));
// Security hardening
dockerfile.push(RUN::from(
"a2enmod headers && \
a2enmod rewrite && \
sed -i 's/ServerTokens OS/ServerTokens Prod/' /etc/apache2/conf-enabled/security.conf && \
sed -i 's/ServerSignature On/ServerSignature Off/' /etc/apache2/conf-enabled/security.conf"
));
// Create a dedicated user for running Apache
dockerfile.push(RUN::from(
"groupadd -g 1000 appuser && \
useradd -u 1000 -g appuser -m -s /bin/bash appuser && \
chown -R appuser:appuser /var/www/html",
));
// Set the working directory
dockerfile.push(WORKDIR::from("/var/www/html"));
// Copy application code from the project root to the container
// Note: In Dockerfile, the COPY context is relative to the build context
// We'll handle the actual context in the build_docker_image method
dockerfile.push(COPY::from(". /var/www/html"));
// Fix permissions
dockerfile.push(RUN::from("chown -R appuser:appuser /var/www/html"));
// Expose Apache port
dockerfile.push(EXPOSE::from("80/tcp"));
// Set the default command
dockerfile.push(CMD::from("apache2-foreground"));
// Save the Dockerfile to disk in the project root
let dockerfile_path = Path::new(&score.config.project_root).join("Dockerfile");
fs::write(&dockerfile_path, dockerfile.to_string())?;
Ok(dockerfile_path)
}
pub fn build_docker_image(&self) -> Result<String, Box<dyn std::error::Error>> {
info!("Generating Dockerfile");
let dockerfile = self.build_dockerfile(&self.score)?;
info!(
"Building Docker image with file {} from root {}",
dockerfile.to_string_lossy(),
self.score.config.project_root.to_string_lossy()
);
let image_name = format!("{}-php-apache", self.score.name);
let project_root = &self.score.config.project_root;
let output = std::process::Command::new("docker")
.args([
"build",
"--file",
dockerfile.to_str().unwrap(),
"-t",
&image_name,
project_root.to_str().unwrap(),
])
.output()?;
if !output.status.success() {
return Err(format!(
"Failed to build Docker image: {}",
String::from_utf8_lossy(&output.stderr)
)
.into());
}
Ok(image_name)
}
}

View File

@@ -13,7 +13,7 @@ use crate::{
};
impl std::fmt::Display for OKDLoadBalancerScore {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
todo!()
}
}

View File

@@ -2,15 +2,15 @@ use crate::data::Version;
#[derive(Debug, Clone)]
pub struct OKDUpgradeScore {
current_version: Version,
target_version: Version,
_current_version: Version,
_target_version: Version,
}
impl OKDUpgradeScore {
pub fn new() -> Self {
Self {
current_version: Version::from("4.17.0-okd-scos.0").unwrap(),
target_version: Version::from("").unwrap(),
_current_version: Version::from("4.17.0-okd-scos.0").unwrap(),
_target_version: Version::from("").unwrap(),
}
}
}

View File

@@ -27,7 +27,7 @@ pub struct OPNsenseShellCommandScore {
}
impl Serialize for OPNsenseShellCommandScore {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{

View File

@@ -17,7 +17,7 @@ pub struct OPNSenseLaunchUpgrade {
}
impl Serialize for OPNSenseLaunchUpgrade {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{

View File

@@ -12,6 +12,7 @@ harmony = { path = "../harmony" }
harmony_tui = { path = "../harmony_tui", optional = true }
inquire.workspace = true
tokio.workspace = true
env_logger.workspace = true
[features]

View File

@@ -99,6 +99,8 @@ pub async fn init<T: Topology + Send + Sync + 'static>(
return Err("Not compiled with interactive support".into());
}
let _ = env_logger::builder().try_init();
let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number);
if scores_vec.len() == 0 {
@@ -147,7 +149,6 @@ mod test {
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::HAClusterTopology,
};
use harmony::{score::Score, topology::Topology};
fn init_test_maestro() -> Maestro<HAClusterTopology> {
let inventory = Inventory::autoload();

View File

@@ -41,7 +41,7 @@ pub mod tui {
/// async fn main() {
/// let inventory = Inventory::autoload();
/// let topology = HAClusterTopology::autoload();
/// let mut maestro = Maestro::new(inventory, topology);
/// let mut maestro = Maestro::new(inventory, topology, "local");
///
/// maestro.register_all(vec![
/// Box::new(SuccessScore {}),

View File

@@ -15,6 +15,7 @@ reqwest = { version = "0.12", features = ["stream"] }
url.workspace = true
sha2 = "0.10.8"
futures-util = "0.3.31"
kube.workspace = true
[dev-dependencies]
env_logger = { workspace = true }

View File

@@ -1,18 +1,23 @@
mod downloadable_asset;
use downloadable_asset::*;
use log::{debug, info};
use kube::Client;
use log::{debug, info, warn};
use std::path::PathBuf;
const K3D_BIN_FILE_NAME: &str = "k3d";
pub struct K3d {
base_dir: PathBuf,
cluster_name: Option<String>,
}
impl K3d {
pub fn new(base_dir: PathBuf) -> Self {
Self { base_dir }
pub fn new(base_dir: PathBuf, cluster_name: Option<String>) -> Self {
Self {
base_dir,
cluster_name,
}
}
async fn get_binary_for_current_platform(
@@ -24,7 +29,6 @@ impl K3d {
debug!("Detecting platform: OS={}, ARCH={}", os, arch);
// 2. Construct the binary name pattern based on platform
let binary_pattern = match (os, arch) {
("linux", "x86") => "k3d-linux-386",
("linux", "x86_64") => "k3d-linux-amd64",
@@ -38,7 +42,6 @@ impl K3d {
debug!("Looking for binary matching pattern: {}", binary_pattern);
// 3. Find the matching binary in release assets
let binary_asset = latest_release
.assets
.iter()
@@ -47,14 +50,12 @@ impl K3d {
let binary_url = binary_asset.browser_download_url.clone();
// 4. Find and parse the checksums file
let checksums_asset = latest_release
.assets
.iter()
.find(|asset| asset.name == "checksums.txt")
.expect("Checksums file not found in release assets");
// 5. Download and parse checksums file
let checksums_url = checksums_asset.browser_download_url.clone();
let body = reqwest::get(checksums_url)
@@ -65,7 +66,6 @@ impl K3d {
.unwrap();
println!("body: {body}");
// 6. Find the checksum for our binary
let checksum = body
.lines()
.find_map(|line| {
@@ -109,6 +109,252 @@ impl K3d {
Ok(latest_release)
}
/// Checks if k3d binary exists and is executable
///
/// Verifies that:
/// 1. The k3d binary exists in the base directory
/// 2. It has proper executable permissions (on Unix systems)
/// 3. It responds correctly to a simple command (`k3d --version`)
pub fn is_installed(&self) -> bool {
let binary_path = self.get_k3d_binary_path();
if !binary_path.exists() {
debug!("K3d binary not found at {:?}", binary_path);
return false;
}
if !self.ensure_binary_executable(&binary_path) {
return false;
}
self.can_execute_binary_check(&binary_path)
}
/// Verifies if the specified cluster is already created
///
/// Executes `k3d cluster list <cluster_name>` and checks for a successful response,
/// indicating that the cluster exists and is registered with k3d.
pub fn is_cluster_initialized(&self) -> bool {
let cluster_name = match self.get_cluster_name() {
Ok(name) => name,
Err(_) => {
debug!("Could not get cluster name, can't verify if cluster is initialized");
return false;
}
};
let binary_path = self.base_dir.join(K3D_BIN_FILE_NAME);
if !binary_path.exists() {
return false;
}
self.verify_cluster_exists(&binary_path, cluster_name)
}
fn get_cluster_name(&self) -> Result<&String, String> {
match &self.cluster_name {
Some(name) => Ok(name),
None => Err("No cluster name available".to_string()),
}
}
/// Creates a new k3d cluster with the specified name
///
/// This method:
/// 1. Creates a new k3d cluster using `k3d cluster create <cluster_name>`
/// 2. Waits for the cluster to initialize
/// 3. Returns a configured Kubernetes client connected to the cluster
///
/// # Returns
/// - `Ok(Client)` - Successfully created cluster and connected client
/// - `Err(String)` - Error message detailing what went wrong
pub async fn initialize_cluster(&self) -> Result<Client, String> {
let cluster_name = match self.get_cluster_name() {
Ok(name) => name,
Err(_) => return Err("Could not get cluster_name, cannot initialize".to_string()),
};
info!("Initializing k3d cluster '{}'", cluster_name);
self.create_cluster(cluster_name)?;
self.create_kubernetes_client().await
}
fn get_k3d_binary_path(&self) -> PathBuf {
self.base_dir.join(K3D_BIN_FILE_NAME)
}
fn get_k3d_binary(&self) -> Result<PathBuf, String> {
let path = self.get_k3d_binary_path();
if !path.exists() {
return Err(format!("K3d binary not found at {:?}", path));
}
Ok(path)
}
/// Ensures k3d is installed and the cluster is initialized
///
/// This method provides a complete setup flow:
/// 1. Checks if k3d is installed, downloads and installs it if needed
/// 2. Verifies if the specified cluster exists, creates it if not
/// 3. Returns a Kubernetes client connected to the cluster
///
/// # Returns
/// - `Ok(Client)` - Successfully ensured k3d and cluster are ready
/// - `Err(String)` - Error message if any step failed
pub async fn ensure_installed(&self) -> Result<Client, String> {
if !self.is_installed() {
info!("K3d is not installed, downloading latest release");
self.download_latest_release()
.await
.map_err(|e| format!("Failed to download k3d: {}", e))?;
if !self.is_installed() {
return Err("Failed to install k3d properly".to_string());
}
}
if !self.is_cluster_initialized() {
info!("Cluster is not initialized, initializing now");
return self.initialize_cluster().await;
}
self.start_cluster().await?;
info!("K3d and cluster are already properly set up");
self.create_kubernetes_client().await
}
// Private helper methods
#[cfg(not(target_os = "windows"))]
fn ensure_binary_executable(&self, binary_path: &PathBuf) -> bool {
use std::os::unix::fs::PermissionsExt;
let mut perms = match std::fs::metadata(binary_path) {
Ok(metadata) => metadata.permissions(),
Err(e) => {
debug!("Failed to get binary metadata: {}", e);
return false;
}
};
perms.set_mode(0o755);
if let Err(e) = std::fs::set_permissions(binary_path, perms) {
debug!("Failed to set executable permissions on k3d binary: {}", e);
return false;
}
true
}
#[cfg(target_os = "windows")]
fn ensure_binary_executable(&self, _binary_path: &PathBuf) -> bool {
// Windows doesn't use executable file permissions
true
}
fn can_execute_binary_check(&self, binary_path: &PathBuf) -> bool {
match std::process::Command::new(binary_path)
.arg("--version")
.output()
{
Ok(output) => {
if output.status.success() {
debug!("K3d binary is installed and working");
true
} else {
debug!("K3d binary check failed: {:?}", output);
false
}
}
Err(e) => {
debug!("Failed to execute K3d binary: {}", e);
false
}
}
}
fn verify_cluster_exists(&self, binary_path: &PathBuf, cluster_name: &str) -> bool {
match std::process::Command::new(binary_path)
.args(["cluster", "list", cluster_name, "--no-headers"])
.output()
{
Ok(output) => {
if output.status.success() && !output.stdout.is_empty() {
debug!("Cluster '{}' is initialized", cluster_name);
true
} else {
debug!("Cluster '{}' is not initialized", cluster_name);
false
}
}
Err(e) => {
debug!("Failed to check cluster initialization: {}", e);
false
}
}
}
pub fn run_k3d_command<I, S>(&self, args: I) -> Result<std::process::Output, String>
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
{
let binary_path = self.get_k3d_binary()?;
let output = std::process::Command::new(binary_path).args(args).output();
match output {
Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr);
debug!("stderr : {}", stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
debug!("stdout : {}", stdout);
Ok(output)
}
Err(e) => Err(format!("Failed to execute k3d command: {}", e)),
}
}
fn create_cluster(&self, cluster_name: &str) -> Result<(), String> {
let output = self.run_k3d_command(["cluster", "create", cluster_name])?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to create cluster: {}", stderr));
}
info!("Successfully created k3d cluster '{}'", cluster_name);
Ok(())
}
async fn create_kubernetes_client(&self) -> Result<Client, String> {
warn!("TODO this method is way too dumb, it should make sure that the client is connected to the k3d cluster actually represented by this instance, not just any default client");
Client::try_default()
.await
.map_err(|e| format!("Failed to create Kubernetes client: {}", e))
}
pub async fn get_client(&self) -> Result<Client, String> {
match self.is_cluster_initialized() {
true => Ok(self.create_kubernetes_client().await?),
false => Err("Cannot get client! Cluster not initialized yet".to_string()),
}
}
async fn start_cluster(&self) -> Result<(), String> {
let cluster_name = self.get_cluster_name()?;
let output = self.run_k3d_command(["cluster", "start", cluster_name])?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to start cluster: {}", stderr));
}
info!("Successfully started k3d cluster '{}'", cluster_name);
Ok(())
}
}
#[cfg(test)]
@@ -124,7 +370,7 @@ mod test {
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false);
let k3d = K3d::new(dir.clone());
let k3d = K3d::new(dir.clone(), None);
let latest_release = k3d.get_latest_release_tag().await.unwrap();
let tag_regex = Regex::new(r"^v\d+\.\d+\.\d+$").unwrap();
@@ -138,7 +384,7 @@ mod test {
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false);
let k3d = K3d::new(dir.clone());
let k3d = K3d::new(dir.clone(), None);
let bin_file_path = k3d.download_latest_release().await.unwrap();
assert_eq!(bin_file_path, dir.join(K3D_BIN_FILE_NAME));
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), true);

View File

@@ -23,7 +23,7 @@ pub struct Config {
}
impl Serialize for Config {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{

View File

@@ -10,11 +10,11 @@ mod test {
use std::net::Ipv4Addr;
use crate::Config;
use pretty_assertions::assert_eq;
#[cfg(opnsenseendtoend)]
#[tokio::test]
async fn test_public_sdk() {
use pretty_assertions::assert_eq;
let mac = "11:22:33:44:55:66";
let ip = Ipv4Addr::new(10, 100, 8, 200);
let hostname = "test_hostname";

View File

@@ -1,2 +1,3 @@
[package]
name = "example"
edition = "2024"