Merge branch 'master' into feat/settingUpNDC

This commit is contained in:
Jean-Gabriel Gill-Couture 2025-05-06 11:58:12 -04:00
commit 1fb7132c64
103 changed files with 8763 additions and 946 deletions

1803
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,8 @@ members = [
"harmony_tui",
"opnsense-config",
"opnsense-config-xml",
"harmony_cli",
"k3d",
]
[workspace.package]
@ -21,21 +23,24 @@ log = "0.4.22"
env_logger = "0.11.5"
derive-new = "0.7.0"
async-trait = "0.1.82"
tokio = { version = "1.40.0", features = ["io-std", "fs"] }
tokio = { version = "1.40.0", features = ["io-std", "fs", "macros", "rt-multi-thread"] }
cidr = "0.2.3"
russh = "0.45.0"
russh-keys = "0.45.0"
rand = "0.8.5"
url = "2.5.4"
kube = "0.98.0"
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
k8s-openapi = { version = "0.24.0", features = ["v1_30"] }
serde_yaml = "0.9.34"
serde-value = "0.7.0"
http = "1.2.0"
inquire = "0.7.5"
convert_case = "0.8.0"
[workspace.dependencies.uuid]
version = "1.11.0"
features = [
"v4", # Lets you generate random UUIDs
"fast-rng", # Use a faster (but still sufficiently random) RNG
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
"v4", # Lets you generate random UUIDs
"fast-rng", # Use a faster (but still sufficiently random) RNG
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
]

View File

@ -1,13 +1,36 @@
### Watch the whole repo on every change
# Harmony : Open Infrastructure Orchestration
Due to the current setup being a mix of separate repositories with gitignore and rust workspace, a few options are required for cargo-watch to have the desired behavior :
## Quick demo
```sh
RUST_LOG=info cargo watch --ignore-nothing -w harmony -w private_repos/ -x 'run --bin nationtech'
```
`cargo run -p example-tui`
This will run the nationtech bin (likely `private_repos/nationtech/src/main.rs`) on any change in the harmony or private_repos folders.
This will launch Harmony's minimalist terminal ui which embeds a few demo scores.
Usage instructions will be displayed at the bottom of the TUI.
`cargo run --bin example-cli -- --help`
This is the harmony CLI, a minimal implementation
The current help text:
````
Usage: example-cli [OPTIONS]
Options:
-y, --yes Run score(s) or not
-f, --filter <FILTER> Filter query
-i, --interactive Run interactive TUI or not
-a, --all Run all or nth, defaults to all
-n, --number <NUMBER> Run nth matching, zero indexed [default: 0]
-l, --list list scores, will also be affected by run filter
-h, --help Print help
-V, --version Print version```
## Core architecture
![Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg)
````
## Supporting a new field in OPNSense `config.xml`
Two steps:

33
adr/000-ADR-Template.md Normal file
View File

@ -0,0 +1,33 @@
# Architecture Decision Record: \<Title\>
Name: \<Name\>
Initial Date: \<Date\>
Last Updated Date: \<Date\>
## Status
Proposed/Pending/Accepted/Implemented
## Context
The problem, background, the "why" behind this decision/discussion
## Decision
Proposed solution to the problem
## Rationale
Reasoning behind the decision
## Consequences
Pros/Cons of chosen solution
## Alternatives considered
Pros/Cons of various proposed solutions considered
## Additional Notes

View File

@ -1,12 +1,18 @@
**Architecture Decision Record: Harmony Infrastructure Abstractions**
## Architecture Decision Record: Core Harmony Infrastructure Abstractions
**Status**: Proposed
## Status
**Context**: Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases.
Proposed
**Decision**: We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations.
## Context
**Example: Database Abstraction**
Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases.
## Decision
We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations.
### Example: Database Abstraction
To deploy a database to any cloud provider, we define an abstraction that includes essential elements such as:
```rust

19
adr/004-ipxe.md Normal file
View File

@ -0,0 +1,19 @@
# ADR: Use iPXE as the Primary Bootloader with Chaining for Architecture Independence
**Status:** Implemented
**Context:**
Harmony requires a flexible and unified bootloader solution to handle both BIOS and UEFI architectures. We need support for dynamic boot configurations, advanced network booting capabilities, and the ability to manage diskless machines.
**Decision:**
Adopt iPXE as the primary bootloader. For BIOS and UEFI clients, use chaining to load iPXE, ensuring all clients boot into a common iPXE environment.
**Consequences:**
- **Benefits:**
- Single configuration file for all architectures.
- Enables dynamic and scripted boot processes.
- Supports booting over various protocols (HTTP, HTTPS, iSCSI, SAN, etc.).
- Allows diskless machines with networked root filesystems.
- **Trade-offs:**
- Adds a dependency on iPXE.
- Requires proper configuration and maintenance of iPXE.

View File

@ -0,0 +1,80 @@
# Architecture Decision Record: Interactive project setup for automated delivery pipeline of various codebases
## Status
Proposal
## Context
Many categories of developers, of which we will focus on LAMP (Linux Apache, MySQL, PHP) developers at first, are underserved by modern delivery tools.
Most of these projects are developed with a small team, small budget, but still are mission critical to their users.
We believe that Harmony, with its end-to-end infrastructure orchestration approach, enables relatively easy integration for this category of projects in a modern delivery pipeline that is opinionated enough that the development team is not overwhelmed by choices, but also flexible enough to allow them to deploy their application according to their habits. This inclues local development, managed dedicated servers, virtualized environments, manual dashboards like CPanel, cloud providers, etc.
To enable this, we need to provide an easy way for developers to step on to the harmony pipeline without disrupting their workflow.
This ADR will outline the approach taken to go from a LAMP project to be standalone, to a LAMP project using harmony that can benefit from all the enterprise grade features of our opinionated delivery pipeline including :
- Automated environment provisionning (local, staging, uat, prod)
- Infrastructure optimized for the delivery stage
- Production with automated backups
- Automated domain names for early stages, configured domain name for production
- SSL certificates
- Secret management
- SSO integration
- IDP, IDS security
- Monitoring, logging
- Artifact registry
- Automated deployment and rollback
- Dependency management (databases, configuration, scripts)
## Decision
# Custom Rust DSL
We decided to develop a rust based DSL. Even though this means people might be "afraid of Rust", we believe the numerous advantages are worth the risk.
The main selection criterias are :
- Robustness : the application/infrastructure definition should not be fragile to typos or versioning. Rusts robust dependency management (cargo) and type safety are best in class for robustness
- Flexibility : Writing the definition in a standard programming language empowers users to easily leverage the internals of harmony to adapt the code to their needs.
- Extensibility : Once again, a standard programming language enables easily importing a configuration, or multiple configurations, create reusable bits, and build upon the different components to really take control over a complex multi-project deployment without going crazy because of a typo in a yaml definition that changed 4 years ago
## Consequences
### Positive
- Complete control over the syntax and semantics of the DSL, tailored specifically to our needs.
- Potential for better performance optimizations as we can implement exactly what is required without additional abstractions.
### Negative
- Higher initial development cost due to building a new language from scratch.
- Steeper learning curve for developers who need to use the DSL.
- Lack of an existing community and ecosystem, which could slow down adoption.
- Increased maintenance overhead as the DSL needs to be updated and supported internally.
## Alternatives considered
### Score spec
We considered integrating with the score-spec project : https://github.com/score-spec/spec
The idea was to benefit from an existing community and ecosystem. The motivations to consider score were the following :
- It is a CNCF project, which helps a lot with adoption and community building
- It already supports important targets for us including docker-compose and k8s
- It provides a way to define the application's infrastructure at the correct level of abstraction for us to deploy it anywhere -- that is the goal of the score-spec project
- Once we evolve, we can simply have a score compatible provider that allows any project with a score spec to be deployed on the harmony stack
- Score was built with enterprise use-cases in mind : Humanitec platform engineering customers
Positive Consequences
- Score Community is growing, using harmony will be very easy for them
Negative Consequences
- Score is not that big yet, and mostly used by Humanitec's clients (I guess), which is a hard to penetrate environment

View File

@ -0,0 +1,88 @@
# Architecture Decision Record: Keycloak for Secret Management
## Status
Proposed
### TODO [#3](https://git.nationtech.io/NationTech/harmony/issues/3):
Before accepting this proposal we need to run a POC to validate this potential issue :
**Keycloak Misuse**: Using Keycloak primarily as a secrets manager is inappropriate, as it's designed for identity and access management (IAM), not secrets management. This creates scalability and functionality limitations.
## Context
Our infrastructure orchestration requires a robust secret management system as part of our automation project to support secure transitions from development to production environments. Key considerations include:
1. **User lifecycle management**: In enterprise settings, developers and administrators frequently join and leave projects, creating security risks if their access to secrets isn't properly revoked.
2. **Security limitations with file-based solutions**: Traditional encrypted file-based approaches (like SOPS) present challenges with user revocation, as departed users can retain local copies of encrypted files and continue accessing secrets indefinitely.
3. **Authentication integration**: Our solution needs to integrate with existing identity providers to leverage centralized authentication systems already in place.
4. **Diverse user base**: Our solution must work well for both enterprise teams and small organizations/individual developers without imposing excessive complexity.
5. **Operational simplicity**: The solution should minimize the cognitive overhead required to manage secrets securely.
## Decision
We will implement Keycloak as our secret management solution with the following workflow:
1. Projects will contain only configuration metadata indicating where secrets are stored and which identity provider to use.
2. When a developer runs the project locally, they will be automatically prompted to authenticate via SSO (browser-based or TOTP).
3. Upon successful authentication, the application will use the developer's credentials to retrieve appropriate secrets from the centralized Keycloak server.
4. When a developer leaves the organization, their SSO access is revoked, automatically removing their ability to access secrets.
For smaller organizations and individual developers, we will provide a fully managed Keycloak instance with:
- A free tier supporting a limited number of secrets or API calls
- Simplified setup that hides complexity through our automation tooling
- Potential for paid tiers as usage scales
### Why keycloack
We wanted a solution that met these criterias
- Fully open source
- Mature
- Integrates with various SSO providers
- Supports secrets
- As easy as possible to deploy on our existing K8s infrastructure
- Supports any kind of secret, not just K8s
Other considered options :
- Vault : not fully pen source
- SOPS : no SSO integration, makes user lifecycle harder
- AWS Secrets : vendor lock-in and cost
- Bitwarden : SSO feature not fully open source
## Consequences
### Positive
1. **Improved security posture**: Secret access is tied directly to centralized identity management, reducing the risk of leaked credentials from former team members.
2. **Simplified developer onboarding**: New team members can immediately access appropriate secrets without manual sharing of encrypted files or keys.
3. **Transparent authentication**: SSO integration creates a seamless experience that leverages existing organizational authentication systems.
4. **Centralized audit capability**: All secret access can be logged and monitored in a single location.
5. **Scalable for different organization sizes**: The managed service option makes enterprise-grade secret management accessible to smaller teams.
### Challenges
1. **Network dependency**: Developers require network connectivity to access secrets, which could complicate offline development scenarios.
2. **Operational overhead**: While hidden from most users, we will need to maintain a reliable managed Keycloak service.
3. **Migration complexity**: Existing projects using file-based secret solutions will require migration assistance.
4. **Potential for clipboard leakage**: While more difficult than with file-based solutions, determined users could still manually copy secrets they've legitimately accessed.
5. **Service availability concerns**: Dependency on the centralized secret service creates a potential single point of failure.
6. **Implementation complexity**: Integrating with various SSO providers and creating a seamless developer experience will require significant initial engineering effort.

View File

@ -0,0 +1,65 @@
## Architecture Decision Record: Default Runtime for Managed Workloads
### Status
Proposed
### Context
Our infrastructure orchestrator manages workloads requiring a Kubernetes-compatible runtime environment.
**Requirements**
- Cross-platform (Linux, Windows, macOS)
- Kubernetes compatibility
- Lightweight, easy setup with minimal dependencies
- Clean host teardown and minimal residue
- Well-maintained and actively supported
### Decision
We select **k3d (k3s in Docker)** as our default runtime environment across all supported platforms (Linux, Windows, macOS).
### Rationale
- **Consistency Across Platforms:**
One solution for all platforms simplifies development, supports documentation, and reduces complexity.
- **Simplified Setup and Teardown:**
k3d runs Kubernetes clusters in Docker containers, allowing quick setup, teardown, and minimal host residue.
- **Leveraging Existing Container Ecosystem:**
Docker/container runtimes are widely adopted, making their presence and familiarity common among users.
- **Kubernetes Compatibility:**
k3s (within k3d) is fully Kubernetes-certified, ensuring compatibility with standard Kubernetes tools and manifests.
- **Active Maintenance and Community:**
k3d and k3s both have active communities and are well-maintained.
### Consequences
#### Positive
- **Uniform User Experience:** Users have a consistent setup experience across all platforms.
- **Reduced Support Overhead:** Standardizing runtime simplifies support, documentation, and troubleshooting.
- **Clean Isolation:** Containerization allows developers to easily clean up clusters without affecting host systems.
- **Facilitates Multi-Cluster Development:** Easy creation and management of multiple clusters concurrently.
#### Negative
- **Docker Dependency:** Requires Docker (or compatible runtime) on all platforms.
- **Potential Overhead:** Slight performance/resource overhead compared to native k3s.
- **Docker Licensing Considerations:** Enterprise licensing of Docker Desktop could introduce additional considerations.
### Alternatives Considered
- **Native k3s (Linux) / k3d (Windows/macOS):** Original proposal. Rejected for greater simplicity and consistency.
- **Minikube, MicroK8s, Kind:** Rejected due to complexity, resource usage, or narrower use-case focus.
- **Docker Compose, Podman Desktop:** Rejected due to lack of orchestration or current limited k3d compatibility.
### Future Work
- Evaluate Podman Desktop or other container runtimes to avoid Docker dependency.
- Continuously monitor k3d maturity and stability.
- Investigate WebAssembly (WASM) runtimes as emerging alternatives for containerized workloads.

View File

@ -0,0 +1,62 @@
## Architecture Decision Record: Data Representation and UI Rendering for Score Types
**Status:** Proposed
**TL;DR:** `Score` types will be serialized (using `serde`) for presentation in UIs. This decouples data definition from presentation, improving scalability and reducing complexity for developers defining `Score` types. New UI types only need to handle existing field types, and new `Score` types dont require UI changes as long as they use existing field types. Adding a new field type *does* require updates to all UIs.
**Key benefits:** Scalability, reduced complexity for `Score` authors, decoupling of data and presentation.
**Key trade-off:** Adding new field types requires updating all UIs.
---
**Context:**
Harmony is a pure Rust infrastructure orchestrator focused on compile-time safety and providing a developer-friendly, Ansible-module-like experience for defining infrastructure configurations via "Scores". These Scores (e.g., `LAMPScore`) are Rust structs composed of specific, strongly-typed fields (e.g., `VersionField`, `UrlField`, `PathField`) which are validated at compile-time using macros (`Version!`, `Url!`, etc.).
A key requirement is displaying the configuration defined in these Scores across various user interfaces (Web UI, TUI, potentially Mobile UI, etc.) in a consistent and type-safe manner. As the number of Score types is expected to grow significantly (hundreds or thousands), we need a scalable approach for rendering their data that avoids tightly coupling Score definitions to specific UI implementations.
The primary challenge is preventing the need for every `Score` struct author to implement multiple display traits (e.g., `Display`, `WebDisplay`, `TuiDisplay`) for every potential UI target. This would create an N x M complexity problem (N Scores * M UI types) and place an unreasonable burden on Score developers, hindering scalability and maintainability.
**Decision:**
1. **Mandatory Serialization:** All `Score` structs *must* implement `serde::Serialize` and `serde::Deserialize`. They *will not* be required to implement `std::fmt::Display` or any custom UI-specific display traits (e.g., `WebDisplay`, `TuiDisplay`).
2. **Field-Level Rendering:** Responsibility for rendering data will reside within the UI components. Each UI (Web, TUI, etc.) will implement logic to display *individual field types* (e.g., `UrlField`, `VersionField`, `IpAddressField`, `SecretField`).
3. **Data Access via Serialization:** UIs will primarily interact with `Score` data through its serialized representation (e.g., JSON obtained via `serde_json`). This provides a standardized interface for UIs to consume the data structure agnostic of the specific `Score` type. Alternatively, UIs *could* potentially use reflection or specific visitor patterns on the `Score` struct itself, but serialization is the preferred decoupling mechanism.
**Rationale:**
1. **Decoupling Data from Presentation:** This decision cleanly separates the data definition (`Score` structs and their fields) from the presentation logic (UI rendering). `Score` authors can focus solely on defining the data and its structure, while UI developers focus on how to best present known data *types*.
2. **Scalability:** This approach scales significantly better than requiring display trait implementations on Scores:
* Adding a *new Score type* requires *no changes* to existing UI code, provided it uses existing field types.
* Adding a *new UI type* requires implementing rendering logic only for the defined set of *field types*, not for every individual `Score` type. This reduces the N x M complexity to N + M complexity (approximately).
3. **Simplicity for Score Authors:** Requiring only `serde::Serialize + Deserialize` (which can often be derived automatically with `#[derive(Serialize, Deserialize)]`) is a much lower burden than implementing custom rendering logic for multiple, potentially unknown, UI targets.
4. **Leverages Rust Ecosystem Standards:** `serde` is the de facto standard for serialization and deserialization in Rust. Relying on it aligns with common Rust practices and benefits from its robustness, performance, and extensive tooling.
5. **Consistency for UIs:** Serialization provides a consistent, structured format (like JSON) for UIs to consume data, regardless of the underlying `Score` struct's complexity or composition.
6. **Flexibility for UI Implementation:** UIs can choose the best way to render each field type based on their capabilities (e.g., a `UrlField` might be a clickable link in a Web UI, plain text in a TUI; a `SecretField` might be masked).
**Consequences:**
**Positive:**
* Greatly improved scalability for adding new Score types and UI targets.
* Strong separation of concerns between data definition and presentation.
* Reduced implementation burden and complexity for Score authors.
* Consistent mechanism for UIs to access and interpret Score data.
* Aligns well with the Hexagonal Architecture (ADR-002) by treating UIs as adapters interacting with the application core via a defined port (the serialized data contract).
**Negative:**
* Adding a *new field type* (e.g., `EmailField`) requires updates to *all* existing UI implementations to support rendering it.
* UI components become dependent on the set of defined field types and need comprehensive logic to handle each one appropriately.
* Potential minor overhead of serialization/deserialization compared to direct function calls (though likely negligible for UI purposes).
* Requires careful design and management of the standard library of field types.
**Alternatives Considered:**
1. **`Score` Implements `std::fmt::Display`:**
* _Rejected:_ Too simplistic. Only suitable for basic text rendering, doesn't cater to structured UIs (Web, etc.), and doesn't allow type-specific rendering logic (e.g., masking secrets). Doesn't scale to multiple UI formats.
2. **`Score` Implements Multiple Custom Display Traits (`WebDisplay`, `TuiDisplay`, etc.):**
* _Rejected:_ Leads directly to the N x M complexity problem. Tightly couples Score definitions to specific UI implementations. Places an excessive burden on Score authors, hindering adoption and scalability.
3. **Generic Display Trait with Context (`Score` implements `DisplayWithContext<UIContext>`):**
* _Rejected:_ More flexible than multiple traits, but still requires Score authors to implement potentially complex rendering logic within the `Score` definition itself. The `Score` would still need awareness of different UI contexts, leading to undesirable coupling. Managing context types adds complexity.

View File

@ -0,0 +1,61 @@
# Architecture Decision Record: Helm and Kustomize Handling
Name: Taha Hawa
Initial Date: 2025-04-15
Last Updated Date: 2025-04-15
## Status
Proposed
## Context
We need to find a way to handle Helm charts and deploy them to a Kubernetes cluster. Helm has a lot of extra functionality that we may or may not need. Kustomize handles Helm charts by inflating them and applying them as vanilla Kubernetes yaml. How should Harmony handle it?
## Decision
In order to move quickly and efficiently, Harmony should handle Helm charts similarly to how Kustomize does: invoke Helm to inflate/render the charts with the needed inputs, and deploy the rendered artifacts to Kubernetes as if it were vanilla manifests.
## Rationale
A lot of Helm's features aren't strictly necessary and would add unneeded overhead. This is likely the fastest way to go from zero to deployed. Other tools (e.g. Kustomize) already do this. Kustomize has tooling for patching and modifying k8s manifests before deploying, and Harmony should have that power too, even if it's not what Helm typically intends.
Perhaps in future also have a Kustomize resource in Harmony? Which could handle Helm charts for Harmony as well/instead.
## Consequences
**Pros**:
- Much easier (and faster) than implementing all of Helm's featureset
- Can potentially re-use code from K8sResource already present in Harmony
- Harmony retains more control over how the deployment goes after rendering (i.e. can act like Kustomize, or leverage Kustomize itself to modify deployments after rendering/inflation)
- Reduce (unstable) surface of dealing with Helm binary
**Cons**:
- Lose some Helm functionality
- Potentially lose some compatibility with Helm
## Alternatives considered
- ### Implement Helm resouce/client fully in Harmony
- **Pros**:
- Retain full compatibility with Helm as a tool
- Retain full functionality of Helm
- **Cons**:
- Longer dev time
- More complex integration
- Dealing with larger (unstable) surface of Helm as a binary
- ### Leverage Kustomize to deal with Helm charts
- **Pros**:
- Already has a good, minimal inflation solution built
- Powerful post-processing/patching
- Can integrate with `kubectl`
- **Cons**:
- Unstable binary tool/surface to deal with
- Still requires Helm to be installed as well as Kustomize
- Not all Helm features supported
## Additional Notes

View File

@ -0,0 +1,68 @@
# Architecture Decision Record: Monitoring and Alerting
Proposed by: Willem Rolleman
Date: April 28 2025
## Status
Proposed
## Context
A harmony user should be able to initialize a monitoring stack easily, either at the first run of Harmony, or that integrates with existing proects and infra without creating multiple instances of the monitoring stack or overwriting existing alerts/configurations.The user also needs a simple way to configure the stack so that it watches the projects. There should be reasonable defaults configured that are easily customizable for each project
## Decision
Create MonitoringStack score that creates a maestro to launch the monitoring stack or not if it is already present.
The MonitoringStack score can be passed to the maestro in the vec! scores list
## Rationale
Having the score launch a maestro will allow the user to easily create a new monitoring stack and keeps composants grouped together. The MonitoringScore can handle all the logic for adding alerts, ensuring that the stack is running etc.
## Alerternatives considered
- ### Implement alerting and monitoring stack using existing HelmScore for each project
- **Pros**:
- Each project can choose to use the monitoring and alerting stack that they choose
- Less overhead in terms of care harmony code
- can add Box::new(grafana::grafanascore(namespace))
- **Cons**:
- No default solution implemented
- Dev needs to chose what they use
- Increases complexity of score projects
- Each project will create a new monitoring and alerting instance rather than joining the existing one
- ### Use OKD grafana and prometheus
- **Pros**:
- Minimal config to do in Harmony
- **Cons**:
- relies on OKD so will not working for local testing via k3d
- ### Create a monitoring and alerting crate similar to harmony tui
- **Pros**:
- Creates a default solution that can be implemented once by harmony
- can create a join function that will allow a project to connect to the existing solution
- eliminates risk of creating multiple instances of grafana or prometheus
- **Cons**:
- more complex than using a helm score
- management of values files for individual functions becomes more complicated, ie how do you create alerts for one project via helm install that doesnt overwrite the other alerts
- ### Add monitoring to Maestro struct so whether the monitoring stack is used must be defined
- **Pros**:
- less for the user to define
- may be easier to set defaults
- **Cons**:
- feels counterintuitive
- would need to modify the structure of the maestro and how it operates which seems like a bad idea
- unclear how to allow user to pass custom values/configs to the monitoring stack for subsequent projects
- ### Create MonitoringStack score to add to scores vec! which loads a maestro to install stack if not ready or add custom endpoints/alerts to existing stack
- **Pros**:
- Maestro already accepts a list of scores to initialize
- leaving out the monitoring score simply means the user does not want monitoring
- if the monitoring stack is already created, the MonitoringStack score doesn't necessarily need to be added to each project
- composants of the monitoring stack are bundled together and can be expaned or modified from the same place
- **Cons**:
- maybe need to create

View File

@ -0,0 +1,360 @@
# Here is the current condenses architecture sample for Harmony's core abstractions
```rust
use std::process::Command;
pub trait Capability {}
pub trait CommandCapability: Capability {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String>;
}
pub trait KubernetesCapability: Capability {
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
}
pub trait Topology {
fn name(&self) -> &str;
}
pub trait Score<T: Topology> {
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String>;
fn name(&self) -> &str;
}
pub struct LinuxHostTopology {
name: String,
host: String,
}
impl Capability for LinuxHostTopology {}
impl LinuxHostTopology {
pub fn new(name: String, host: String) -> Self {
Self { name, host }
}
}
impl Topology for LinuxHostTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for LinuxHostTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
println!("Executing on {}: {} {:?}", self.host, command, args);
// In a real implementation, this would SSH to the host and execute the command
let output = Command::new(command)
.args(args)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}
}
pub struct K3DTopology {
name: String,
linux_host: LinuxHostTopology,
cluster_name: String,
}
impl Capability for K3DTopology {}
impl K3DTopology {
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
Self {
name,
linux_host,
cluster_name,
}
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for K3DTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
self.linux_host.execute_command(command, args)
}
}
impl KubernetesCapability for K3DTopology {
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
// Write manifest to a temporary file
let temp_file = format!("/tmp/manifest-harmony-temp.yaml");
// Use the linux_host directly to avoid capability trait bounds
self.linux_host
.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?;
// Apply with kubectl
self.linux_host.execute_command("kubectl", &[
"--context",
&format!("k3d-{}", self.cluster_name),
"apply",
"-f",
&temp_file,
])?;
Ok(())
}
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
println!(
"Getting resource {}/{} from K3D cluster '{}'",
resource_type, name, self.cluster_name
);
self.linux_host.execute_command("kubectl", &[
"--context",
&format!("k3d-{}", self.cluster_name),
"get",
resource_type,
name,
"-o",
"yaml",
])
}
}
pub struct CommandScore {
name: String,
command: String,
args: Vec<String>,
}
impl CommandScore {
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
Self {
name,
command,
args,
}
}
}
pub trait Interpret<T: Topology> {
fn execute(&self, topology: &T) -> Result<String, String>;
}
struct CommandInterpret;
impl<T> Interpret<T> for CommandInterpret
where
T: Topology + CommandCapability,
{
fn execute(&self, topology: &T) -> Result<String, String> {
todo!()
}
}
impl<T> Score<T> for CommandScore
where
T: Topology + CommandCapability,
{
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
Ok(Box::new(CommandInterpret {}))
}
fn name(&self) -> &str {
&self.name
}
}
#[derive(Clone)]
pub struct K8sResourceScore {
name: String,
manifest: String,
}
impl K8sResourceScore {
pub fn new(name: String, manifest: String) -> Self {
Self { name, manifest }
}
}
struct K8sResourceInterpret {
score: K8sResourceScore,
}
impl<T: Topology + KubernetesCapability> Interpret<T> for K8sResourceInterpret {
fn execute(&self, topology: &T) -> Result<String, String> {
todo!()
}
}
impl<T> Score<T> for K8sResourceScore
where
T: Topology + KubernetesCapability,
{
fn compile(&self) -> Result<Box<(dyn Interpret<T> + 'static)>, String> {
Ok(Box::new(K8sResourceInterpret {
score: self.clone(),
}))
}
fn name(&self) -> &str {
&self.name
}
}
pub struct Maestro<T: Topology> {
topology: T,
scores: Vec<Box<dyn Score<T>>>,
}
impl<T: Topology> Maestro<T> {
pub fn new(topology: T) -> Self {
Self {
topology,
scores: Vec::new(),
}
}
pub fn register_score<S>(&mut self, score: S)
where
S: Score<T> + 'static,
{
println!(
"Registering score '{}' for topology '{}'",
score.name(),
self.topology.name()
);
self.scores.push(Box::new(score));
}
pub fn orchestrate(&self) -> Result<(), String> {
println!("Orchestrating topology '{}'", self.topology.name());
for score in &self.scores {
let interpret = score.compile()?;
interpret.execute(&self.topology)?;
}
Ok(())
}
}
fn main() {
let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string());
let mut linux_maestro = Maestro::new(linux_host);
linux_maestro.register_score(CommandScore::new(
"check-disk".to_string(),
"df".to_string(),
vec!["-h".to_string()],
));
linux_maestro.orchestrate().unwrap();
// This would fail to compile if we tried to register a K8sResourceScore
// because LinuxHostTopology doesn't implement KubernetesCapability
//linux_maestro.register_score(K8sResourceScore::new(
// "...".to_string(),
// "...".to_string(),
//));
// Create a K3D topology which has both Command and Kubernetes capabilities
let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string());
let k3d_topology = K3DTopology::new(
"dev-cluster".to_string(),
k3d_host,
"devcluster".to_string(),
);
// Create a maestro for the K3D topology
let mut k3d_maestro = Maestro::new(k3d_topology);
// We can register both command scores and kubernetes scores
k3d_maestro.register_score(CommandScore::new(
"check-nodes".to_string(),
"kubectl".to_string(),
vec!["get".to_string(), "nodes".to_string()],
));
k3d_maestro.register_score(K8sResourceScore::new(
"deploy-nginx".to_string(),
r#"
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
"#
.to_string(),
));
// Orchestrate both topologies
linux_maestro.orchestrate().unwrap();
k3d_maestro.orchestrate().unwrap();
}
```
## Technical take
The key insight is that we might not need a complex TypeMap or runtime capability checking. Instead, we should leverage Rust's trait system to express capability requirements directly in the type system.
By clarifying the problem and focusing on type-level solutions rather than runtime checks, we can likely arrive at a simpler, more robust design that leverages the strengths of Rust's type system.
## Philosophical Shifts
1. **From Runtime to Compile-Time**: Move capability checking from runtime to compile-time.
2. **From Objects to Functions**: Think of scores less as objects and more as functions that transform topologies.
3. **From Homogeneous to Heterogeneous API**: Embrace different API paths for different capability combinations rather than trying to force everything through a single interface.
4. **From Complex to Simple**: Focus on making common cases simple, even if it means less abstraction for uncommon cases.
## High level concepts
The high level concepts so far has evolved towards this definition.
Topology -> Has -> Capabilities
Score -> Defines -> Work to be done / desired state
Interpret -> Requires -> Capabilities to execute a Score
Maestro -> Enforces -> Compatibility (through the type system at compile time)
## Why Harmony
The compile time safety is paramount here. Harmony's main goal is to make the entire software delivery pipeline robust. Current IaC tools are very hard to work with, require complex setups to test and debug real code.
Leveraging Rust's compiler allows us to shift left a lot of the complexities and frustration that comes with using tools like Ansible that is Yaml based and quickly becomes brittle at scale. Or Terraform, when running a `terraform plan` makes you think everything is correct only to fail horribly when confidently launching `terraform apply` and leaving you with tens or hundreds of resources to clean manually.
Of course, this requires a significant effort to get to the point where we have actually implemented all the logic.
But using Rust and a Type Driven Design approach, we believe we are providing a much more robust foundation for our customer's and user's deployments anywhere.
Also, having the full power of a mature programming language like Rust enables organizations and the community to customize their deployment any way they want, build upon it in a reliable way that has been evolved and proven over decades of enterprise dependency management, API definitions, etc.
===
Given all this c

View File

@ -0,0 +1,10 @@
[package]
name = "example-topology"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
rand.workspace = true

View File

@ -0,0 +1,232 @@
// Basic traits from your example
trait Topology {}
trait Score: Clone + std::fmt::Debug {
fn get_interpret<T: Topology>(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
}
trait Interpret<T: Topology> {
fn execute(&self);
}
struct Maestro<T: Topology> {
topology: T
}
impl<T: Topology> Maestro<T> {
pub fn new(topology: T) -> Self {
Maestro { topology }
}
pub fn register_score<S: Score + 'static>(&self, score: S) {
println!("Registering score: {}", score.name());
}
pub fn execute_score<S: Score + 'static>(&self, score: S) {
println!("Executing score: {}", score.name());
score.get_interpret::<T>().execute();
}
}
// Capability traits - these are used to enforce requirements
trait CommandExecution {
fn execute_command(&self, command: &[String]) -> Result<String, String>;
}
trait FileSystem {
fn read_file(&self, path: &str) -> Result<String, String>;
fn write_file(&self, path: &str, content: &str) -> Result<(), String>;
}
// A concrete topology implementation
#[derive(Clone, Debug)]
struct LinuxHostTopology {
hostname: String,
}
impl Topology for LinuxHostTopology {}
// Implement the capabilities for LinuxHostTopology
impl CommandExecution for LinuxHostTopology {
fn execute_command(&self, command: &[String]) -> Result<String, String> {
println!("Executing command on {}: {:?}", self.hostname, command);
// In a real implementation, this would use std::process::Command
Ok(format!("Command executed successfully on {}", self.hostname))
}
}
impl FileSystem for LinuxHostTopology {
fn read_file(&self, path: &str) -> Result<String, String> {
println!("Reading file {} on {}", path, self.hostname);
Ok(format!("Content of {} on {}", path, self.hostname))
}
fn write_file(&self, path: &str, content: &str) -> Result<(), String> {
println!("Writing to file {} on {}: {}", path, self.hostname, content);
Ok(())
}
}
// Another topology that doesn't support command execution
#[derive(Clone, Debug)]
struct BareMetalTopology {
device_id: String,
}
impl Topology for BareMetalTopology {}
impl FileSystem for BareMetalTopology {
fn read_file(&self, path: &str) -> Result<String, String> {
println!("Reading file {} on device {}", path, self.device_id);
Ok(format!("Content of {} on device {}", path, self.device_id))
}
fn write_file(&self, path: &str, content: &str) -> Result<(), String> {
println!("Writing to file {} on device {}: {}", path, self.device_id, content);
Ok(())
}
}
// CommandScore implementation
#[derive(Clone, Debug)]
struct CommandScore {
name: String,
args: Vec<String>,
}
impl CommandScore {
pub fn new(name: String, args: Vec<String>) -> Self {
CommandScore { name, args }
}
}
impl Score for CommandScore {
fn get_interpret<T: Topology + CommandExecution + 'static>(&self) -> Box<dyn Interpret<T>> {
// This is the key part: we constrain T to implement CommandExecution
// If T doesn't implement CommandExecution, this will fail to compile
Box::new(CommandInterpret::<T>::new(self.clone()))
}
fn name(&self) -> String {
self.name.clone()
}
}
// CommandInterpret implementation
struct CommandInterpret<T: Topology + CommandExecution> {
score: CommandScore,
_marker: std::marker::PhantomData<T>,
}
impl<T: Topology + CommandExecution> CommandInterpret<T> {
pub fn new(score: CommandScore) -> Self {
CommandInterpret {
score,
_marker: std::marker::PhantomData,
}
}
}
impl<T: Topology + CommandExecution> Interpret<T> for CommandInterpret<T> {
fn execute(&self) {
println!("Command interpret is executing: {:?}", self.score.args);
// In a real implementation, you would call the topology's execute_command method
// topology.execute_command(&self.score.args);
}
}
// FileScore implementation - a different type of score that requires FileSystem capability
#[derive(Clone, Debug)]
struct FileScore {
name: String,
path: String,
content: Option<String>,
}
impl FileScore {
pub fn new_read(name: String, path: String) -> Self {
FileScore { name, path, content: None }
}
pub fn new_write(name: String, path: String, content: String) -> Self {
FileScore { name, path, content: Some(content) }
}
}
impl Score for FileScore {
fn get_interpret<T: Topology>(&self) -> Box<dyn Interpret<T>> {
// This constrains T to implement FileSystem
Box::new(FileInterpret::<T>::new(self.clone()))
}
fn name(&self) -> String {
self.name.clone()
}
}
// FileInterpret implementation
struct FileInterpret<T: Topology + FileSystem> {
score: FileScore,
_marker: std::marker::PhantomData<T>,
}
impl<T: Topology + FileSystem> FileInterpret<T> {
pub fn new(score: FileScore) -> Self {
FileInterpret {
score,
_marker: std::marker::PhantomData,
}
}
}
impl<T: Topology + FileSystem> Interpret<T> for FileInterpret<T> {
fn execute(&self) {
match &self.score.content {
Some(content) => {
println!("File interpret is writing to {}: {}", self.score.path, content);
// In a real implementation: topology.write_file(&self.score.path, content);
},
None => {
println!("File interpret is reading from {}", self.score.path);
// In a real implementation: let content = topology.read_file(&self.score.path);
}
}
}
}
fn main() {
// Create our topologies
let linux = LinuxHostTopology { hostname: "server1.example.com".to_string() };
let bare_metal = BareMetalTopology { device_id: "device001".to_string() };
// Create our maestros
let linux_maestro = Maestro::new(linux);
let bare_metal_maestro = Maestro::new(bare_metal);
// Create scores
let command_score = CommandScore::new(
"List Files".to_string(),
vec!["ls".to_string(), "-la".to_string()]
);
let file_read_score = FileScore::new_read(
"Read Config".to_string(),
"/etc/config.json".to_string()
);
// This will work because LinuxHostTopology implements CommandExecution
linux_maestro.execute_score(command_score.clone());
// This will work because LinuxHostTopology implements FileSystem
linux_maestro.execute_score(file_read_score.clone());
// This will work because BareMetalTopology implements FileSystem
bare_metal_maestro.execute_score(file_read_score);
// This would NOT compile because BareMetalTopology doesn't implement CommandExecution:
// bare_metal_maestro.execute_score(command_score);
// The error would occur at compile time, ensuring type safety
println!("All scores executed successfully!");
}

View File

@ -0,0 +1,314 @@
mod main_gemini25pro;
use std::process::Command;
pub trait Capability {}
pub trait CommandCapability: Capability {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String>;
}
pub trait KubernetesCapability: Capability {
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
}
pub trait Topology {
fn name(&self) -> &str;
}
pub trait Score<T: Topology> {
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String>;
fn name(&self) -> &str;
}
pub struct LinuxHostTopology {
name: String,
host: String,
}
impl Capability for LinuxHostTopology {}
impl LinuxHostTopology {
pub fn new(name: String, host: String) -> Self {
Self { name, host }
}
}
impl Topology for LinuxHostTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for LinuxHostTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
println!("Executing on {}: {} {:?}", self.host, command, args);
// In a real implementation, this would SSH to the host and execute the command
let output = Command::new(command)
.args(args)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}
}
pub struct K3DTopology {
name: String,
linux_host: LinuxHostTopology,
cluster_name: String,
}
impl Capability for K3DTopology {}
impl K3DTopology {
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
Self {
name,
linux_host,
cluster_name,
}
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for K3DTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
self.linux_host.execute_command(command, args)
}
}
impl KubernetesCapability for K3DTopology {
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
// Write manifest to a temporary file
let temp_file = format!("/tmp/manifest-harmony-temp.yaml");
// Use the linux_host directly to avoid capability trait bounds
self.linux_host
.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?;
// Apply with kubectl
self.linux_host.execute_command("kubectl", &[
"--context",
&format!("k3d-{}", self.cluster_name),
"apply",
"-f",
&temp_file,
])?;
Ok(())
}
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
println!(
"Getting resource {}/{} from K3D cluster '{}'",
resource_type, name, self.cluster_name
);
self.linux_host.execute_command("kubectl", &[
"--context",
&format!("k3d-{}", self.cluster_name),
"get",
resource_type,
name,
"-o",
"yaml",
])
}
}
pub struct CommandScore {
name: String,
command: String,
args: Vec<String>,
}
impl CommandScore {
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
Self {
name,
command,
args,
}
}
}
pub trait Interpret<T: Topology> {
fn execute(&self, topology: &T) -> Result<String, String>;
}
struct CommandInterpret;
impl<T> Interpret<T> for CommandInterpret
where
T: Topology + CommandCapability,
{
fn execute(&self, topology: &T) -> Result<String, String> {
todo!()
}
}
impl<T> Score<T> for CommandScore
where
T: Topology + CommandCapability,
{
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
Ok(Box::new(CommandInterpret {}))
}
fn name(&self) -> &str {
&self.name
}
}
#[derive(Clone)]
pub struct K8sResourceScore {
name: String,
manifest: String,
}
impl K8sResourceScore {
pub fn new(name: String, manifest: String) -> Self {
Self { name, manifest }
}
}
struct K8sResourceInterpret {
score: K8sResourceScore,
}
impl<T: Topology + KubernetesCapability> Interpret<T> for K8sResourceInterpret {
fn execute(&self, topology: &T) -> Result<String, String> {
todo!()
}
}
impl<T> Score<T> for K8sResourceScore
where
T: Topology + KubernetesCapability,
{
fn compile(&self) -> Result<Box<(dyn Interpret<T> + 'static)>, String> {
Ok(Box::new(K8sResourceInterpret {
score: self.clone(),
}))
}
fn name(&self) -> &str {
&self.name
}
}
pub struct Maestro<T: Topology> {
topology: T,
scores: Vec<Box<dyn Score<T>>>,
}
impl<T: Topology> Maestro<T> {
pub fn new(topology: T) -> Self {
Self {
topology,
scores: Vec::new(),
}
}
pub fn register_score<S>(&mut self, score: S)
where
S: Score<T> + 'static,
{
println!(
"Registering score '{}' for topology '{}'",
score.name(),
self.topology.name()
);
self.scores.push(Box::new(score));
}
pub fn orchestrate(&self) -> Result<(), String> {
println!("Orchestrating topology '{}'", self.topology.name());
for score in &self.scores {
let interpret = score.compile()?;
interpret.execute(&self.topology)?;
}
Ok(())
}
}
fn main() {
let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string());
let mut linux_maestro = Maestro::new(linux_host);
linux_maestro.register_score(CommandScore::new(
"check-disk".to_string(),
"df".to_string(),
vec!["-h".to_string()],
));
linux_maestro.orchestrate().unwrap();
// This would fail to compile if we tried to register a K8sResourceScore
// because LinuxHostTopology doesn't implement KubernetesCapability
//linux_maestro.register_score(K8sResourceScore::new(
// "...".to_string(),
// "...".to_string(),
//));
// Create a K3D topology which has both Command and Kubernetes capabilities
let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string());
let k3d_topology = K3DTopology::new(
"dev-cluster".to_string(),
k3d_host,
"devcluster".to_string(),
);
// Create a maestro for the K3D topology
let mut k3d_maestro = Maestro::new(k3d_topology);
// We can register both command scores and kubernetes scores
k3d_maestro.register_score(CommandScore::new(
"check-nodes".to_string(),
"kubectl".to_string(),
vec!["get".to_string(), "nodes".to_string()],
));
k3d_maestro.register_score(K8sResourceScore::new(
"deploy-nginx".to_string(),
r#"
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
"#
.to_string(),
));
// Orchestrate both topologies
linux_maestro.orchestrate().unwrap();
k3d_maestro.orchestrate().unwrap();
}

View File

@ -0,0 +1,323 @@
use std::marker::PhantomData;
use std::process::Command;
// ===== Capability Traits =====
/// Base trait for all capabilities
pub trait Capability {}
/// Capability for executing shell commands on a host
pub trait CommandCapability: Capability {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String>;
}
/// Capability for interacting with a Kubernetes cluster
pub trait KubernetesCapability: Capability {
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
}
// ===== Topology Traits =====
/// Base trait for all topologies
pub trait Topology {
// Base topology methods that don't depend on capabilities
fn name(&self) -> &str;
}
// ===== Score Traits =====
/// Generic Score trait with an associated Capability type
pub trait Score<T: Topology> {
fn apply(&self, topology: &T) -> Result<(), String>;
fn name(&self) -> &str;
}
// ===== Concrete Topologies =====
/// A topology representing a Linux host
pub struct LinuxHostTopology {
name: String,
host: String,
}
impl LinuxHostTopology {
pub fn new(name: String, host: String) -> Self {
Self { name, host }
}
}
impl Topology for LinuxHostTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for LinuxHostTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
println!("Executing on {}: {} {:?}", self.host, command, args);
// In a real implementation, this would SSH to the host and execute the command
let output = Command::new(command)
.args(args)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}
}
/// A topology representing a K3D Kubernetes cluster
pub struct K3DTopology {
name: String,
linux_host: LinuxHostTopology,
cluster_name: String,
}
impl K3DTopology {
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
Self {
name,
linux_host,
cluster_name,
}
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for K3DTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> Result<String, String> {
// Delegate to the underlying Linux host
self.linux_host.execute_command(command, args)
}
}
impl KubernetesCapability for K3DTopology {
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
// Write manifest to a temporary file
let temp_file = format!("/tmp/manifest-{}.yaml", rand::random::<u32>());
self.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?;
// Apply with kubectl
self.execute_command(
"kubectl",
&["--context", &format!("k3d-{}", self.cluster_name), "apply", "-f", &temp_file]
)?;
Ok(())
}
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
println!("Getting resource {}/{} from K3D cluster '{}'", resource_type, name, self.cluster_name);
self.execute_command(
"kubectl",
&[
"--context",
&format!("k3d-{}", self.cluster_name),
"get",
resource_type,
name,
"-o",
"yaml",
]
)
}
}
// ===== Concrete Scores =====
/// A score that executes commands on a topology
pub struct CommandScore {
name: String,
command: String,
args: Vec<String>,
}
impl CommandScore {
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
Self { name, command, args }
}
}
impl<T> Score<T> for CommandScore
where
T: Topology + CommandCapability
{
fn apply(&self, topology: &T) -> Result<(), String> {
println!("Applying CommandScore '{}' to topology '{}'", self.name, topology.name());
let args_refs: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
topology.execute_command(&self.command, &args_refs)?;
Ok(())
}
fn name(&self) -> &str {
&self.name
}
}
/// A score that applies Kubernetes resources to a topology
pub struct K8sResourceScore {
name: String,
manifest: String,
}
impl K8sResourceScore {
pub fn new(name: String, manifest: String) -> Self {
Self { name, manifest }
}
}
impl<T> Score<T> for K8sResourceScore
where
T: Topology + KubernetesCapability
{
fn apply(&self, topology: &T) -> Result<(), String> {
println!("Applying K8sResourceScore '{}' to topology '{}'", self.name, topology.name());
topology.apply_manifest(&self.manifest)
}
fn name(&self) -> &str {
&self.name
}
}
// ===== Maestro Orchestrator =====
/// Type-safe orchestrator that enforces capability requirements at compile time
pub struct Maestro<T: Topology> {
topology: T,
scores: Vec<Box<dyn ScoreWrapper<T>>>,
}
/// A trait object wrapper that hides the specific Score type but preserves its
/// capability requirements
trait ScoreWrapper<T: Topology> {
fn apply(&self, topology: &T) -> Result<(), String>;
fn name(&self) -> &str;
}
/// Implementation of ScoreWrapper for any Score that works with topology T
impl<T, S> ScoreWrapper<T> for S
where
T: Topology,
S: Score<T> + 'static
{
fn apply(&self, topology: &T) -> Result<(), String> {
<S as Score<T>>::apply(self, topology)
}
fn name(&self) -> &str {
<S as Score<T>>::name(self)
}
}
impl<T: Topology> Maestro<T> {
pub fn new(topology: T) -> Self {
Self {
topology,
scores: Vec::new(),
}
}
/// Register a score that is compatible with this topology's capabilities
pub fn register_score<S>(&mut self, score: S)
where
S: Score<T> + 'static
{
println!("Registering score '{}' for topology '{}'", score.name(), self.topology.name());
self.scores.push(Box::new(score));
}
/// Apply all registered scores to the topology
pub fn orchestrate(&self) -> Result<(), String> {
println!("Orchestrating topology '{}'", self.topology.name());
for score in &self.scores {
score.apply(&self.topology)?;
}
Ok(())
}
}
// ===== Example Usage =====
fn main() {
// Create a Linux host topology
let linux_host = LinuxHostTopology::new(
"dev-machine".to_string(),
"localhost".to_string()
);
// Create a maestro for the Linux host
let mut linux_maestro = Maestro::new(linux_host);
// Register a command score that works with any topology having CommandCapability
linux_maestro.register_score(CommandScore::new(
"check-disk".to_string(),
"df".to_string(),
vec!["-h".to_string()]
));
// This would fail to compile if we tried to register a K8sResourceScore
// because LinuxHostTopology doesn't implement KubernetesCapability
// linux_maestro.register_score(K8sResourceScore::new(...));
// Create a K3D topology which has both Command and Kubernetes capabilities
let k3d_host = LinuxHostTopology::new(
"k3d-host".to_string(),
"localhost".to_string()
);
let k3d_topology = K3DTopology::new(
"dev-cluster".to_string(),
k3d_host,
"devcluster".to_string()
);
// Create a maestro for the K3D topology
let mut k3d_maestro = Maestro::new(k3d_topology);
// We can register both command scores and kubernetes scores
k3d_maestro.register_score(CommandScore::new(
"check-nodes".to_string(),
"kubectl".to_string(),
vec!["get".to_string(), "nodes".to_string()]
));
k3d_maestro.register_score(K8sResourceScore::new(
"deploy-nginx".to_string(),
r#"
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
"#.to_string()
));
// Orchestrate both topologies
linux_maestro.orchestrate().unwrap();
k3d_maestro.orchestrate().unwrap();
}

View File

@ -0,0 +1,369 @@
// Import necessary items (though for this example, few are needed beyond std)
use std::fmt;
// --- Error Handling ---
// A simple error type for demonstration purposes. In a real app, use `thiserror` or `anyhow`.
#[derive(Debug)]
enum OrchestrationError {
CommandFailed(String),
KubeClientError(String),
TopologySetupFailed(String),
}
impl fmt::Display for OrchestrationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
OrchestrationError::CommandFailed(e) => write!(f, "Command execution failed: {}", e),
OrchestrationError::KubeClientError(e) => write!(f, "Kubernetes client error: {}", e),
OrchestrationError::TopologySetupFailed(e) => write!(f, "Topology setup failed: {}", e),
}
}
}
impl std::error::Error for OrchestrationError {}
// Define a common Result type
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
// --- 1. Capability Specification (as Traits) ---
/// Capability trait representing the ability to run Linux commands.
/// This follows the "Parse, Don't Validate" idea implicitly - if you have an object
/// implementing this, you know you *can* run commands, no need to check later.
trait LinuxOperations {
fn run_command(&self, command: &str) -> Result<String>;
}
/// A mock Kubernetes client trait for demonstration.
trait KubeClient {
fn apply_manifest(&self, manifest: &str) -> Result<()>;
fn get_pods(&self, namespace: &str) -> Result<Vec<String>>;
}
/// Mock implementation of a KubeClient.
struct MockKubeClient {
cluster_name: String,
}
impl KubeClient for MockKubeClient {
fn apply_manifest(&self, manifest: &str) -> Result<()> {
println!(
"[{}] Applying Kubernetes manifest:\n---\n{}\n---",
self.cluster_name, manifest
);
// Simulate success or failure
if manifest.contains("invalid") {
Err(Box::new(OrchestrationError::KubeClientError(
"Invalid manifest content".into(),
)))
} else {
Ok(())
}
}
fn get_pods(&self, namespace: &str) -> Result<Vec<String>> {
println!(
"[{}] Getting pods in namespace '{}'",
self.cluster_name, namespace
);
Ok(vec![
format!("pod-a-12345-{}-{}", namespace, self.cluster_name),
format!("pod-b-67890-{}-{}", namespace, self.cluster_name),
])
}
}
/// Capability trait representing access to a Kubernetes cluster.
/// This follows Rust Embedded WG's "Zero-Cost Abstractions" - the trait itself
/// adds no runtime overhead, only compile-time structure.
trait KubernetesCluster {
// Provides access to a Kubernetes client instance.
// Using `impl Trait` in return position for flexibility.
fn get_kube_client(&self) -> Result<impl KubeClient>;
}
// --- 2. Topology Implementations ---
// Topologies implement the capabilities they provide.
/// Represents a basic Linux host.
#[derive(Debug, Clone)]
struct LinuxHostTopology {
hostname: String,
// In a real scenario: SSH connection details, etc.
}
impl LinuxHostTopology {
fn new(hostname: &str) -> Self {
println!("Initializing LinuxHostTopology for {}", hostname);
Self {
hostname: hostname.to_string(),
}
}
}
// LinuxHostTopology provides LinuxOperations capability.
impl LinuxOperations for LinuxHostTopology {
fn run_command(&self, command: &str) -> Result<String> {
println!("[{}] Running command: '{}'", self.hostname, command);
// Simulate command execution (e.g., via SSH)
if command.starts_with("fail") {
Err(Box::new(OrchestrationError::CommandFailed(format!(
"Command '{}' failed",
command
))))
} else {
Ok(format!("Output of '{}' on {}", command, self.hostname))
}
}
}
/// Represents a K3D (Kubernetes in Docker) cluster running on a host.
#[derive(Debug, Clone)]
struct K3DTopology {
cluster_name: String,
host_os: String, // Example: might implicitly run commands on the underlying host
// In a real scenario: Kubeconfig path, Docker client, etc.
}
impl K3DTopology {
fn new(cluster_name: &str) -> Self {
println!("Initializing K3DTopology for cluster {}", cluster_name);
Self {
cluster_name: cluster_name.to_string(),
host_os: "Linux".to_string(), // Assume k3d runs on Linux for this example
}
}
}
// K3DTopology provides KubernetesCluster capability.
impl KubernetesCluster for K3DTopology {
fn get_kube_client(&self) -> Result<impl KubeClient> {
println!("[{}] Creating mock Kubernetes client", self.cluster_name);
// In a real scenario, this would initialize a client using kubeconfig etc.
Ok(MockKubeClient {
cluster_name: self.cluster_name.clone(),
})
}
}
// K3DTopology *also* provides LinuxOperations (e.g., for running commands inside nodes or on the host managing k3d).
impl LinuxOperations for K3DTopology {
fn run_command(&self, command: &str) -> Result<String> {
println!(
"[{} on {} host] Running command: '{}'",
self.cluster_name, self.host_os, command
);
// Simulate command execution (maybe `docker exec` or similar)
if command.starts_with("fail") {
Err(Box::new(OrchestrationError::CommandFailed(format!(
"Command '{}' failed within k3d context",
command
))))
} else {
Ok(format!(
"Output of '{}' within k3d cluster {}",
command, self.cluster_name
))
}
}
}
// --- 3. Score Implementations ---
// Scores require capabilities via trait bounds on their execution logic.
/// Base trait for identifying scores. Could be empty or hold metadata.
trait Score {
fn name(&self) -> &'static str;
// We don't put execute here, as its signature depends on required capabilities.
}
/// A score that runs a shell command on a Linux host.
#[derive(Debug)]
struct CommandScore {
command: String,
}
impl Score for CommandScore {
fn name(&self) -> &'static str {
"CommandScore"
}
}
impl CommandScore {
fn new(command: &str) -> Self {
Self {
command: command.to_string(),
}
}
/// Execute method is generic over T, but requires T implements LinuxOperations.
/// This follows the "Scores as Polymorphic Functions" idea.
fn execute<T: LinuxOperations + ?Sized>(&self, topology: &T) -> Result<()> {
println!("Executing Score: {}", Score::name(self));
let output = topology.run_command(&self.command)?;
println!("Command Score Output: {}", output);
Ok(())
}
}
/// A score that applies a Kubernetes resource manifest.
#[derive(Debug)]
struct K8sResourceScore {
manifest_path: String, // Path or content
}
impl Score for K8sResourceScore {
fn name(&self) -> &'static str {
"K8sResourceScore"
}
}
impl K8sResourceScore {
fn new(manifest_path: &str) -> Self {
Self {
manifest_path: manifest_path.to_string(),
}
}
/// Execute method requires T implements KubernetesCluster.
fn execute<T: KubernetesCluster + ?Sized>(&self, topology: &T) -> Result<()> {
println!("Executing Score: {}", Score::name(self));
let client = topology.get_kube_client()?;
let manifest_content = format!(
"apiVersion: v1\nkind: Pod\nmetadata:\n name: my-pod-from-{}",
self.manifest_path
); // Simulate reading file
client.apply_manifest(&manifest_content)?;
println!(
"K8s Resource Score applied manifest: {}",
self.manifest_path
);
Ok(())
}
}
// --- 4. Maestro (The Orchestrator) ---
// This version of Maestro uses a helper trait (`ScoreRunner`) to enable
// storing heterogeneous scores while preserving compile-time checks.
/// A helper trait to erase the specific capability requirements *after*
/// the compiler has verified them, allowing storage in a Vec.
/// The verification happens in the blanket impls below.
trait ScoreRunner<T> {
// T is the concrete Topology type
fn run(&self, topology: &T) -> Result<()>;
fn name(&self) -> &'static str;
}
// Blanket implementation: A CommandScore can be run on any Topology T
// *if and only if* T implements LinuxOperations.
// The compiler checks this bound when `add_score` is called.
impl<T: LinuxOperations> ScoreRunner<T> for CommandScore {
fn run(&self, topology: &T) -> Result<()> {
self.execute(topology) // Call the capability-specific execute method
}
fn name(&self) -> &'static str {
Score::name(self)
}
}
// Blanket implementation: A K8sResourceScore can be run on any Topology T
// *if and only if* T implements KubernetesCluster.
impl<T: KubernetesCluster> ScoreRunner<T> for K8sResourceScore {
fn run(&self, topology: &T) -> Result<()> {
self.execute(topology) // Call the capability-specific execute method
}
fn name(&self) -> &'static str {
Score::name(self)
}
}
/// The Maestro orchestrator, strongly typed to a specific Topology `T`.
struct Maestro<T> {
topology: T,
// Stores type-erased runners, but addition is type-safe.
scores: Vec<Box<dyn ScoreRunner<T>>>,
}
impl<T> Maestro<T> {
/// Creates a new Maestro instance bound to a specific topology.
fn new(topology: T) -> Self {
println!("Maestro initialized.");
Maestro {
topology,
scores: Vec::new(),
}
}
/// Adds a score to the Maestro.
/// **Compile-time check happens here!**
/// The `S: ScoreRunner<T>` bound ensures that the score `S` provides an
/// implementation of `ScoreRunner` *for the specific topology type `T`*.
/// The blanket impls above ensure this is only possible if `T` has the
/// required capabilities for `S`.
/// This directly follows the "Theoretical Example: The Compiler as an Ally".
fn add_score<S>(&mut self, score: S)
where
S: Score + ScoreRunner<T> + 'static, // S must be runnable on *this* T
{
println!("Registering score: {}", Score::name(&score));
self.scores.push(Box::new(score));
}
/// Runs all registered scores sequentially on the topology.
fn run_all(&self) -> Vec<Result<()>> {
println!("\n--- Running all scores ---");
self.scores
.iter()
.map(|score_runner| {
println!("---");
let result = score_runner.run(&self.topology);
match &result {
Ok(_) => println!("Score '{}' completed successfully.", score_runner.name()),
Err(e) => eprintln!("Score '{}' failed: {}", score_runner.name(), e),
}
result
})
.collect()
}
}
// --- 5. Example Usage ---
fn main() {
println!("=== Scenario 1: Linux Host Topology ===");
let linux_host = LinuxHostTopology::new("server1.example.com");
let mut maestro_linux = Maestro::new(linux_host);
// Add scores compatible with LinuxHostTopology (which has LinuxOperations)
maestro_linux.add_score(CommandScore::new("uname -a"));
maestro_linux.add_score(CommandScore::new("ls -l /tmp"));
// *** Compile-time Error Example ***
// Try adding a score that requires KubernetesCluster capability.
// This line WILL NOT COMPILE because LinuxHostTopology does not implement KubernetesCluster,
// therefore K8sResourceScore does not implement ScoreRunner<LinuxHostTopology>.
// maestro_linux.add_score(K8sResourceScore::new("my-app.yaml"));
// Uncomment the line above to see the compiler error! The error message will
// likely point to the `ScoreRunner<LinuxHostTopology>` bound not being satisfied
// for `K8sResourceScore`.
let results_linux = maestro_linux.run_all();
println!("\nLinux Host Results: {:?}", results_linux);
println!("\n=== Scenario 2: K3D Topology ===");
let k3d_cluster = K3DTopology::new("dev-cluster");
let mut maestro_k3d = Maestro::new(k3d_cluster);
// Add scores compatible with K3DTopology (which has LinuxOperations AND KubernetesCluster)
maestro_k3d.add_score(CommandScore::new("pwd")); // Uses LinuxOperations
maestro_k3d.add_score(K8sResourceScore::new("nginx-deployment.yaml")); // Uses KubernetesCluster
maestro_k3d.add_score(K8sResourceScore::new("invalid-service.yaml")); // Test error case
maestro_k3d.add_score(CommandScore::new("fail please")); // Test error case
let results_k3d = maestro_k3d.run_all();
println!("\nK3D Cluster Results: {:?}", results_k3d);
println!("\n=== Compile-Time Safety Demonstrated ===");
println!("(Check the commented-out line in the code for the compile error example)");
}

View File

@ -0,0 +1,492 @@
use std::any::Any;
use std::fmt::Debug;
use std::process::Command;
pub trait Capability {}
pub trait CommandCapability: Capability {
fn execute_command(&self, command: &str, args: &Vec<String>) -> Result<String, String>;
}
pub trait KubernetesCapability: Capability {
fn apply_manifest(&self, manifest: &str) -> Result<(), String>;
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String>;
}
pub trait Topology {
fn name(&self) -> &str;
}
pub trait Interpret<T: Topology> {
fn execute(&self, topology: &T) -> Result<String, String>;
}
// --- Score Definition Structs (Concrete) ---
// CommandScore struct remains the same
#[derive(Debug, Clone)] // Added Debug/Clone for easier handling
pub struct CommandScore {
name: String,
command: String,
args: Vec<String>,
}
impl CommandScore {
pub fn new(name: String, command: String, args: Vec<String>) -> Self {
Self { name, command, args }
}
}
// K8sResourceScore struct remains the same
#[derive(Debug, Clone)]
pub struct K8sResourceScore {
name: String,
manifest: String,
}
impl K8sResourceScore {
pub fn new(name: String, manifest: String) -> Self {
Self { name, manifest }
}
}
// --- Metadata / Base Score Trait (Non-Generic) ---
// Trait for common info and enabling downcasting later if needed
pub trait ScoreDefinition: Debug + Send + Sync {
fn name(&self) -> &str;
// Method to allow downcasting
fn as_any(&self) -> &dyn Any;
// Optional: Could add methods for description, parameters etc.
// fn description(&self) -> &str;
// Optional but potentially useful: A way to clone the definition
fn box_clone(&self) -> Box<dyn ScoreDefinition>;
}
// Implement Clone for Box<dyn ScoreDefinition>
impl Clone for Box<dyn ScoreDefinition> {
fn clone(&self) -> Self {
self.box_clone()
}
}
// Implement ScoreDefinition for your concrete score types
impl ScoreDefinition for CommandScore {
fn name(&self) -> &str {
&self.name
}
fn as_any(&self) -> &dyn Any {
self
}
fn box_clone(&self) -> Box<dyn ScoreDefinition> {
Box::new(self.clone())
}
}
impl ScoreDefinition for K8sResourceScore {
fn name(&self) -> &str {
&self.name
}
fn as_any(&self) -> &dyn Any {
self
}
fn box_clone(&self) -> Box<dyn ScoreDefinition> {
Box::new(self.clone())
}
}
// --- Score Compatibility Trait (Generic over T) ---
// This remains largely the same, ensuring compile-time checks
pub trait Score<T: Topology>: ScoreDefinition {
// No need for name() here, it's in ScoreDefinition
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String>;
}
// --- Implementations of Score<T> (Crucial Link) ---
// CommandScore implements Score<T> for any T with CommandCapability
impl<T> Score<T> for CommandScore
where
T: Topology + CommandCapability + 'static, // Added 'static bound often needed for Box<dyn>
// Self: ScoreDefinition // This bound is implicit now
{
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
// Pass necessary data from self to CommandInterpret
Ok(Box::new(CommandInterpret {
command: self.command.clone(),
args: self.args.clone(),
}))
}
}
// K8sResourceScore implements Score<T> for any T with KubernetesCapability
impl<T> Score<T> for K8sResourceScore
where
T: Topology + KubernetesCapability + 'static,
// Self: ScoreDefinition
{
fn compile(&self) -> Result<Box<dyn Interpret<T>>, String> {
Ok(Box::new(K8sResourceInterpret {
manifest: self.manifest.clone(), // Pass needed data
}))
}
}
// --- Interpret Implementations ---
// Need to hold the actual data now
struct CommandInterpret {
command: String,
args: Vec<String>, // Or owned Strings if lifetime is tricky
}
impl<'a, T> Interpret<T> for CommandInterpret
where
T: Topology + CommandCapability,
{
fn execute(&self, topology: &T) -> Result<String, String> {
// Now uses data stored in self
topology.execute_command(&self.command, &self.args)
}
}
struct K8sResourceInterpret {
manifest: String,
}
impl<T: Topology + KubernetesCapability> Interpret<T> for K8sResourceInterpret {
fn execute(&self, topology: &T) -> Result<String, String> {
topology.apply_manifest(&self.manifest)?;
// apply_manifest returns Result<(), String>, adapt if needed
Ok(format!("Applied manifest for {}", topology.name())) // Example success message
}
}
// --- Maestro ---
// Maestro remains almost identical, leveraging the Score<T> bound
pub struct Maestro<T: Topology> {
topology: T,
// Stores Score<T> trait objects, ensuring compatibility
scores: Vec<Box<dyn Score<T>>>,
}
impl<T: Topology + 'static> Maestro<T> { // Often need T: 'static here
pub fn new(topology: T) -> Self {
Self {
topology,
scores: Vec::new(),
}
}
// This method signature is key - it takes a concrete S
// and the compiler checks if S implements Score<T>
pub fn register_score<S>(&mut self, score: S) -> Result<(), String>
where
S: Score<T> + ScoreDefinition + Clone + 'static, // Ensure S is a Score for *this* T
// We might need S: Clone if we want to store Box::new(score)
// Alternatively, accept Box<dyn ScoreDefinition> and try to downcast/wrap
{
println!(
"Registering score '{}' for topology '{}'",
score.name(),
self.topology.name()
);
// The compiler has already guaranteed that S implements Score<T>
// We need to box it as dyn Score<T>
self.scores.push(Box::new(score));
Ok(())
}
// Alternative registration if you have Box<dyn ScoreDefinition>
pub fn register_score_definition(&mut self, score_def: Box<dyn ScoreDefinition>) -> Result<(), String>
where
T: Topology + CommandCapability + KubernetesCapability + 'static, // Example: list all needed caps here, or use generics + downcasting
{
println!(
"Attempting to register score '{}' for topology '{}'",
score_def.name(),
self.topology.name()
);
// Downcast to check concrete type and then check compatibility
if let Some(cs) = score_def.as_any().downcast_ref::<CommandScore>() {
// Check if T satisfies CommandScore's requirements (CommandCapability)
// This check is somewhat manual or needs restructuring if we avoid listing all caps
// A simpler way is to just try to create the Box<dyn Score<T>>
let boxed_score: Box<dyn Score<T>> = Box::new(cs.clone()); // This relies on the blanket impls
self.scores.push(boxed_score);
Ok(())
} else if let Some(ks) = score_def.as_any().downcast_ref::<K8sResourceScore>() {
// Check if T satisfies K8sResourceScore's requirements (KubernetesCapability)
let boxed_score: Box<dyn Score<T>> = Box::new(ks.clone());
self.scores.push(boxed_score);
Ok(())
} else {
Err(format!("Score '{}' is of an unknown type or incompatible", score_def.name()))
}
// This downcasting approach in Maestro slightly undermines the full compile-time
// check unless designed carefully. The generic `register_score<S: Score<T>>` is safer.
}
pub fn orchestrate(&self) -> Result<(), String> {
println!("Orchestrating topology '{}'", self.topology.name());
for score in &self.scores {
println!("Compiling score '{}'", score.name()); // Use name() from ScoreDefinition
let interpret = score.compile()?;
println!("Executing score '{}'", score.name());
interpret.execute(&self.topology)?;
}
Ok(())
}
}
// --- TUI Example ---
struct ScoreItem {
// Holds the definition/metadata, NOT the Score<T> trait object
definition: Box<dyn ScoreDefinition>,
}
struct HarmonyTui {
// List of available score *definitions*
available_scores: Vec<ScoreItem>,
// Example: Maybe maps topology names to Maestros
// maestros: HashMap<String, Box<dyn Any>>, // Storing Maestros generically is another challenge!
}
impl HarmonyTui {
fn new() -> Self {
HarmonyTui { available_scores: vec![] }
}
fn add_available_score(&mut self, score_def: Box<dyn ScoreDefinition>) {
self.available_scores.push(ScoreItem { definition: score_def });
}
fn display_scores(&self) {
println!("Available Scores:");
for (i, item) in self.available_scores.iter().enumerate() {
println!("{}: {}", i, item.definition.name());
}
}
fn execute_score(&self, score: ScoreItem) {
score.definition.
}
// Example: Function to add a selected score to a specific Maestro
// This function would need access to the Maestros and handle the types
fn add_selected_score_to_maestro<T>(
&self,
score_index: usize,
maestro: &mut Maestro<T>
) -> Result<(), String>
where
T: Topology + CommandCapability + KubernetesCapability + 'static, // Adjust bounds as needed
{
let score_item = self.available_scores.get(score_index)
.ok_or("Invalid score index")?;
// We have Box<dyn ScoreDefinition>, need to add to Maestro<T>
// Easiest is to downcast and call the generic register_score
if let Some(cs) = score_item.definition.as_any().downcast_ref::<CommandScore>() {
// Compiler checks if CommandScore: Score<T> via register_score's bound
maestro.register_score(cs.clone())?;
Ok(())
} else if let Some(ks) = score_item.definition.as_any().downcast_ref::<K8sResourceScore>() {
// Compiler checks if K8sResourceScore: Score<T> via register_score's bound
maestro.register_score(ks.clone())?;
Ok(())
} else {
Err(format!("Cannot add score '{}': Unknown type or check Maestro compatibility", score_item.definition.name()))
}
}
}
pub struct K3DTopology {
name: String,
linux_host: LinuxHostTopology,
cluster_name: String,
}
impl Capability for K3DTopology {}
impl K3DTopology {
pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self {
Self {
name,
linux_host,
cluster_name,
}
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for K3DTopology {
fn execute_command(&self, command: &str, args: &Vec<String>) -> Result<String, String> {
self.linux_host.execute_command(command, args)
}
}
impl KubernetesCapability for K3DTopology {
fn apply_manifest(&self, manifest: &str) -> Result<(), String> {
println!("Applying manifest to K3D cluster '{}'", self.cluster_name);
// Write manifest to a temporary file
let temp_file = format!("/tmp/manifest-harmony-temp.yaml");
// Use the linux_host directly to avoid capability trait bounds
self.linux_host
.execute_command("bash", &Vec::from(["-c".to_string(), format!("cat > {}", temp_file)]))?;
// Apply with kubectl
self.linux_host.execute_command("kubectl", &Vec::from([
"--context".to_string(),
format!("k3d-{}", self.cluster_name),
"apply".to_string(),
"-f".to_string(),
temp_file.to_string(),
]))?;
Ok(())
}
fn get_resource(&self, resource_type: &str, name: &str) -> Result<String, String> {
println!(
"Getting resource {}/{} from K3D cluster '{}'",
resource_type, name, self.cluster_name
);
self.linux_host.execute_command("kubectl", &Vec::from([
"--context".to_string(),
format!("k3d-{}", self.cluster_name),
"get".to_string(),
resource_type.to_string(),
name.to_string(),
"-o".to_string(),
"yaml".to_string(),
]))
}
}
pub struct LinuxHostTopology {
name: String,
host: String,
}
impl Capability for LinuxHostTopology {}
impl LinuxHostTopology {
pub fn new(name: String, host: String) -> Self {
Self { name, host }
}
}
impl Topology for LinuxHostTopology {
fn name(&self) -> &str {
&self.name
}
}
impl CommandCapability for LinuxHostTopology {
fn execute_command(&self, command: &str, args: &Vec<String>) -> Result<String, String> {
println!("Executing on {}: {} {:?}", self.host, command, args);
// In a real implementation, this would SSH to the host and execute the command
let output = Command::new(command)
.args(args)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}
}
// --- Main Function Adapated ---
fn main() {
// --- Linux Host ---
let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string());
let mut linux_maestro = Maestro::new(linux_host);
let df_score = CommandScore::new(
"check-disk".to_string(),
"df".to_string(),
vec!["-h".to_string()],
);
// Registration uses the generic method, compiler checks CommandScore: Score<LinuxHostTopology>
linux_maestro.register_score(df_score.clone()).unwrap(); // clone needed if df_score used later
// --- K3D Host ---
let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string());
let k3d_topology = K3DTopology::new(
"dev-cluster".to_string(),
k3d_host,
"devcluster".to_string(),
);
let mut k3d_maestro = Maestro::new(k3d_topology);
let nodes_score = CommandScore::new(
"check-nodes".to_string(),
"kubectl".to_string(),
vec!["get".to_string(), "nodes".to_string()],
);
let nginx_score = K8sResourceScore::new(
"deploy-nginx".to_string(),
// ... manifest string ...
r#"..."#.to_string(),
);
// Compiler checks CommandScore: Score<K3DTopology>
k3d_maestro.register_score(nodes_score.clone()).unwrap();
// Compiler checks K8sResourceScore: Score<K3DTopology>
k3d_maestro.register_score(nginx_score.clone()).unwrap();
// --- TUI Example Usage ---
let mut tui = HarmonyTui::new();
// Add score *definitions* to the TUI
tui.add_available_score(Box::new(df_score));
tui.add_available_score(Box::new(nodes_score));
tui.add_available_score(Box::new(nginx_score));
tui.display_scores();
// Simulate user selecting score 0 (check-disk) and adding to linux_maestro
match tui.add_selected_score_to_maestro(0, &mut linux_maestro) {
Ok(_) => println!("Successfully registered check-disk to linux_maestro via TUI selection"),
Err(e) => println!("Failed: {}", e), // Should succeed
}
// Simulate user selecting score 2 (deploy-nginx) and adding to linux_maestro
match tui.add_selected_score_to_maestro(2, &mut linux_maestro) {
Ok(_) => println!("Successfully registered deploy-nginx to linux_maestro via TUI selection"), // Should fail!
Err(e) => println!("Correctly failed to add deploy-nginx to linux_maestro: {}", e),
// The failure happens inside add_selected_score_to_maestro because the
// maestro.register_score(ks.clone()) call fails the trait bound check
// K8sResourceScore: Score<LinuxHostTopology> is false.
}
// Simulate user selecting score 2 (deploy-nginx) and adding to k3d_maestro
match tui.add_selected_score_to_maestro(2, &mut k3d_maestro) {
Ok(_) => println!("Successfully registered deploy-nginx to k3d_maestro via TUI selection"), // Should succeed
Err(e) => println!("Failed: {}", e),
}
// --- Orchestration ---
println!("\n--- Orchestrating Linux Maestro ---");
linux_maestro.orchestrate().unwrap();
println!("\n--- Orchestrating K3D Maestro ---");
k3d_maestro.orchestrate().unwrap();
}

View File

@ -0,0 +1,129 @@
use std::marker::PhantomData;
// Capability Trait Hierarchy
pub trait Capability {}
// Specific Capability Traits
pub trait ShellAccess: Capability {}
pub trait ContainerRuntime: Capability {}
pub trait KubernetesAccess: Capability {}
pub trait FileSystemAccess: Capability {}
// Topology Trait - Defines the core interface for infrastructure topologies
pub trait Topology {
type Capabilities: Capability;
fn name(&self) -> &str;
}
// Score Trait - Defines the core interface for infrastructure transformation
pub trait Score {
type RequiredCapabilities: Capability;
type OutputTopology: Topology;
fn apply<T: Topology>(&self, topology: T) -> Result<Self::OutputTopology, String>;
}
// Linux Host Topology
pub struct LinuxHostTopology;
impl Topology for LinuxHostTopology {
type Capabilities = dyn ShellAccess + FileSystemAccess;
fn name(&self) -> &str {
"Linux Host"
}
}
impl ShellAccess for LinuxHostTopology {}
impl FileSystemAccess for LinuxHostTopology {}
// K3D Topology
pub struct K3DTopology;
impl Topology for K3DTopology {
type Capabilities = dyn ContainerRuntime + KubernetesAccess + ShellAccess;
fn name(&self) -> &str {
"K3D Kubernetes Cluster"
}
}
impl ContainerRuntime for K3DTopology {}
impl KubernetesAccess for K3DTopology {}
impl ShellAccess for K3DTopology {}
// Command Score - A score that requires shell access
pub struct CommandScore {
command: String,
}
impl Score for CommandScore {
type RequiredCapabilities = dyn ShellAccess;
type OutputTopology = LinuxHostTopology;
fn apply<T: Topology>(&self, _topology: T) -> Result<Self::OutputTopology, String>
where
T: ShellAccess
{
// Simulate command execution
println!("Executing command: {}", self.command);
Ok(LinuxHostTopology)
}
}
// Kubernetes Resource Score
pub struct K8sResourceScore {
resource_definition: String,
}
impl Score for K8sResourceScore {
type RequiredCapabilities = dyn KubernetesAccess;
type OutputTopology = K3DTopology;
fn apply<T: Topology>(&self, _topology: T) -> Result<Self::OutputTopology, String>
where
T: dyn KubernetesAccess
{
// Simulate Kubernetes resource application
println!("Applying K8s resource: {}", self.resource_definition);
Ok(K3DTopology)
}
}
// Maestro - The orchestration coordinator
pub struct Maestro;
impl Maestro {
// Type-safe score application
pub fn apply_score<T, S>(topology: T, score: S) -> Result<S::OutputTopology, String>
where
T: Topology,
S: Score,
T: S::RequiredCapabilities
{
score.apply(topology)
}
}
fn main() {
// Example usage demonstrating type-driven design
let linux_host = LinuxHostTopology;
let k3d_cluster = K3DTopology;
// Command score on Linux host
let command_score = CommandScore {
command: "echo 'Hello, World!'".to_string(),
};
let result = Maestro::apply_score(linux_host, command_score)
.expect("Command score application failed");
// K8s resource score on K3D cluster
let k8s_score = K8sResourceScore {
resource_definition: "apiVersion: v1\nkind: Pod\n...".to_string(),
};
let k8s_result = Maestro::apply_score(k3d_cluster, k8s_score)
.expect("K8s resource score application failed");
}

View File

@ -0,0 +1,155 @@
mod main_right;
mod main_claude;
// Capability Traits
trait Capability {}
trait LinuxOperations: Capability {
fn execute_command(&self, command: &str) -> Result<String, String>;
}
trait KubernetesOperations: Capability {
fn create_resource(&self, resource: &str) -> Result<String, String>;
fn delete_resource(&self, resource: &str) -> Result<String, String>;
}
// Topology Implementations
struct LinuxHostTopology;
impl LinuxOperations for LinuxHostTopology {
fn execute_command(&self, command: &str) -> Result<String, String> {
// Implementation for executing commands on a Linux host
Ok(format!("Executed command: {}", command))
}
}
impl Capability for LinuxHostTopology {}
struct K3DTopology;
impl KubernetesOperations for K3DTopology {
fn create_resource(&self, resource: &str) -> Result<String, String> {
// Implementation for creating Kubernetes resources in K3D
Ok(format!("Created resource: {}", resource))
}
fn delete_resource(&self, resource: &str) -> Result<String, String> {
// Implementation for deleting Kubernetes resources in K3D
Ok(format!("Deleted resource: {}", resource))
}
}
impl Capability for K3DTopology {}
// Score Implementations
struct K8sResourceScore {
resource: String,
}
impl<T> Score<T> for K8sResourceScore
where
T: KubernetesOperations,
{
fn execute(&self, topology: &T) -> Result<String, String> {
topology.create_resource(&self.resource)
}
}
struct CommandScore {
command: String,
}
impl<T> Score<T> for CommandScore
where
T: LinuxOperations + 'static,
{
fn execute(&self, topology: &T) -> Result<String, String> {
topology.execute_command(&self.command)
}
}
// Score Trait
trait Score<T>
where
T: Capability + 'static,
{
fn execute(&self, topology: &T) -> Result<String, String>;
}
// Maestro Implementation
struct Maestro {
scores: Vec<Box<dyn Score<Box<dyn Capability>>>>,
}
impl Maestro {
fn new() -> Self {
Maestro { scores: Vec::new() }
}
fn register_score<T>(&mut self, score: Box<T>)
where
T: Score<Box<dyn Capability>> + 'static,
{
self.scores.push(Box::new(score));
}
fn execute_scores<T>(&self, topology: &T) -> Result<Vec<String>, String>
where
T: Capability + 'static,
{
let mut results = Vec::new();
for score in &self.scores {
if let Some(score) = score.as_any().downcast_ref::<Box<dyn Score<T>>>() {
results.push(score.execute(topology)?);
}
}
Ok(results)
}
}
// Helper trait for downcasting
trait AsAny {
fn as_any(&self) -> &dyn std::any::Any;
}
impl<T: 'static> AsAny for T {
fn as_any(&self) -> &dyn std::any::Any {
self
}
}
// Main Function
fn main() {
let mut maestro = Maestro::new();
let k8s_score = K8sResourceScore {
resource: "deployment.yaml".to_string(),
};
maestro.register_score(k8s_score);
let command_score = CommandScore {
command: "ls -l".to_string(),
};
maestro.register_score(command_score);
let linux_topology = LinuxHostTopology;
let k3d_topology = K3DTopology;
let linux_results = maestro.execute_scores(&linux_topology).unwrap();
println!("Linux Topology Results:");
for result in linux_results {
println!("{}", result);
}
let k3d_results = maestro.execute_scores(&k3d_topology).unwrap();
println!("K3D Topology Results:");
for result in k3d_results {
println!("{}", result);
}
}

View File

@ -0,0 +1,9 @@
[package]
name = "example-topology2"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]

View File

@ -0,0 +1,183 @@
// Clean capability-based design using type parameters
trait Capability {}
trait K8sCapability: Capability {
fn deploy_k8s_resource(&self, resource_yaml: &str);
fn execute_kubectl(&self, command: &str) -> String;
}
trait LinuxCapability: Capability {
fn execute_command(&self, command: &str, args: &[&str]);
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
}
trait LoadBalancerCapability: Capability {
fn configure_load_balancer(&self, services: &[&str], port: u16);
fn get_load_balancer_status(&self) -> String;
}
// Score trait with capability type parameter
trait Score<C: ?Sized> {
fn execute(&self, capability: &C) -> String;
}
// Topology implementations with marker trait
trait Topology {}
struct K3DTopology {}
impl Topology for K3DTopology {}
impl Capability for K3DTopology {}
impl K8sCapability for K3DTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
todo!()
}
fn execute_kubectl(&self, command: &str) -> String {
todo!()
}
// Implementation...
}
struct LinuxTopology {}
impl Topology for LinuxTopology {}
impl Capability for LinuxTopology {}
impl LinuxCapability for LinuxTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
todo!()
}
// Implementation...
}
struct OKDHaClusterTopology {}
impl Topology for OKDHaClusterTopology {}
impl Capability for OKDHaClusterTopology {}
impl K8sCapability for OKDHaClusterTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
todo!()
}
fn execute_kubectl(&self, command: &str) -> String {
todo!()
}
// Implementation...
}
impl LinuxCapability for OKDHaClusterTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
todo!()
}
// Implementation...
}
impl LoadBalancerCapability for OKDHaClusterTopology {
fn configure_load_balancer(&self, services: &[&str], port: u16) {
todo!()
}
fn get_load_balancer_status(&self) -> String {
todo!()
}
// Implementation...
}
// Score implementations
struct LAMPScore {}
impl Score<dyn K8sCapability> for LAMPScore {
fn execute(&self, capability: &dyn K8sCapability) -> String {
todo!()
// Implementation...
}
}
struct BinaryScore {}
impl Score<dyn LinuxCapability> for BinaryScore {
fn execute(&self, capability: &dyn LinuxCapability) -> String {
todo!()
// Implementation...
}
}
struct LoadBalancerScore {}
impl Score<dyn LoadBalancerCapability> for LoadBalancerScore {
fn execute(&self, capability: &dyn LoadBalancerCapability) -> String {
todo!()
// Implementation...
}
}
// Generic Maestro
struct Maestro<T> {
topology: T,
scores: Vec<Box<dyn FnMut(&T) -> String>>,
}
impl<T: 'static> Maestro<T> {
fn new(topology: T) -> Self {
Self {
topology,
scores: Vec::new(),
}
}
fn interpret_all(&mut self) -> Vec<String> {
self.scores.iter_mut()
.map(|score| score(&self.topology))
.collect()
}
}
// Capability-specific extensions
impl<T: K8sCapability + 'static> Maestro<T> {
fn register_k8s_score<S: Score<dyn K8sCapability> + 'static>(&mut self, score: S) {
let score_box = Box::new(move |topology: &T| {
score.execute(topology as &dyn K8sCapability)
});
self.scores.push(score_box);
}
}
impl<T: LinuxCapability + 'static> Maestro<T> {
fn register_linux_score<S: Score<dyn LinuxCapability> + 'static>(&mut self, score: S) {
let score_box = Box::new(move |topology: &T| {
score.execute(topology as &dyn LinuxCapability)
});
self.scores.push(score_box);
}
}
impl<T: LoadBalancerCapability + 'static> Maestro<T> {
fn register_lb_score<S: Score<dyn LoadBalancerCapability> + 'static>(&mut self, score: S) {
let score_box = Box::new(move |topology: &T| {
score.execute(topology as &dyn LoadBalancerCapability)
});
self.scores.push(score_box);
}
}
fn main() {
// Example usage
let k3d = K3DTopology {};
let mut k3d_maestro = Maestro::new(k3d);
// These will compile because K3D implements K8sCapability
k3d_maestro.register_k8s_score(LAMPScore {});
// This would not compile because K3D doesn't implement LoadBalancerCapability
// k3d_maestro.register_lb_score(LoadBalancerScore {});
let linux = LinuxTopology {};
let mut linux_maestro = Maestro::new(linux);
// This will compile because Linux implements LinuxCapability
linux_maestro.register_linux_score(BinaryScore {});
// This would not compile because Linux doesn't implement K8sCapability
// linux_maestro.register_k8s_score(LAMPScore {});
}

View File

@ -0,0 +1,324 @@
fn main() {
// Create various topologies
let okd_topology = OKDHaClusterTopology::new();
let k3d_topology = K3DTopology::new();
let linux_topology = LinuxTopology::new();
// Create scores
let lamp_score = LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4");
let binary_score = BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"]);
let load_balancer_score = LoadBalancerScore::new(vec!["service1", "service2"], 80);
// Example 1: Running LAMP stack on OKD
println!("\n=== Deploying LAMP stack on OKD cluster ===");
lamp_score.execute(&okd_topology);
// Example 2: Running LAMP stack on K3D
println!("\n=== Deploying LAMP stack on K3D cluster ===");
lamp_score.execute(&k3d_topology);
// Example 3: Running binary on Linux host
println!("\n=== Running binary on Linux host ===");
binary_score.execute(&linux_topology);
// Example 4: Running binary on OKD (which can also run Linux commands)
println!("\n=== Running binary on OKD host ===");
binary_score.execute(&okd_topology);
// Example 5: Load balancer configuration on OKD
println!("\n=== Configuring load balancer on OKD ===");
load_balancer_score.execute(&okd_topology);
// The following would not compile:
// load_balancer_score.execute(&k3d_topology); // K3D doesn't implement LoadBalancerCapability
// lamp_score.execute(&linux_topology); // Linux doesn't implement K8sCapability
}
// Base Topology trait
trait Topology {
fn name(&self) -> &str;
}
// Define capabilities
trait K8sCapability {
fn deploy_k8s_resource(&self, resource_yaml: &str);
fn execute_kubectl(&self, command: &str) -> String;
}
trait OKDCapability: K8sCapability {
fn execute_oc(&self, command: &str) -> String;
}
trait LinuxCapability {
fn execute_command(&self, command: &str, args: &[&str]) -> String;
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
}
trait LoadBalancerCapability {
fn configure_load_balancer(&self, services: &[&str], port: u16);
fn get_load_balancer_status(&self) -> String;
}
trait FirewallCapability {
fn open_port(&self, port: u16, protocol: &str);
fn close_port(&self, port: u16, protocol: &str);
}
trait RouterCapability {
fn configure_route(&self, service: &str, hostname: &str);
}
// Topology implementations
struct OKDHaClusterTopology {
cluster_name: String,
}
impl OKDHaClusterTopology {
fn new() -> Self {
Self {
cluster_name: "okd-ha-cluster".to_string(),
}
}
}
impl Topology for OKDHaClusterTopology {
fn name(&self) -> &str {
&self.cluster_name
}
}
impl K8sCapability for OKDHaClusterTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on OKD cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on OKD cluster: {}", command);
"kubectl command output".to_string()
}
}
impl OKDCapability for OKDHaClusterTopology {
fn execute_oc(&self, command: &str) -> String {
println!("Executing oc command on OKD cluster: {}", command);
"oc command output".to_string()
}
}
impl LinuxCapability for OKDHaClusterTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> String {
println!(
"Executing command '{}' with args {:?} on OKD node",
command, args
);
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!(
"Downloading file from {} to {} on OKD node",
url, destination
);
Ok(())
}
}
impl LoadBalancerCapability for OKDHaClusterTopology {
fn configure_load_balancer(&self, services: &[&str], port: u16) {
println!(
"Configuring load balancer for services {:?} on port {} in OKD",
services, port
);
}
fn get_load_balancer_status(&self) -> String {
"OKD Load Balancer: HEALTHY".to_string()
}
}
impl FirewallCapability for OKDHaClusterTopology {
fn open_port(&self, port: u16, protocol: &str) {
println!(
"Opening port {} with protocol {} on OKD firewall",
port, protocol
);
}
fn close_port(&self, port: u16, protocol: &str) {
println!(
"Closing port {} with protocol {} on OKD firewall",
port, protocol
);
}
}
impl RouterCapability for OKDHaClusterTopology {
fn configure_route(&self, service: &str, hostname: &str) {
println!(
"Configuring route for service {} with hostname {} on OKD",
service, hostname
);
}
}
struct K3DTopology {
cluster_name: String,
}
impl K3DTopology {
fn new() -> Self {
Self {
cluster_name: "k3d-local".to_string(),
}
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.cluster_name
}
}
impl K8sCapability for K3DTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on K3D cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on K3D cluster: {}", command);
"kubectl command output from K3D".to_string()
}
}
struct LinuxTopology {
hostname: String,
}
impl LinuxTopology {
fn new() -> Self {
Self {
hostname: "linux-host".to_string(),
}
}
}
impl Topology for LinuxTopology {
fn name(&self) -> &str {
&self.hostname
}
}
impl LinuxCapability for LinuxTopology {
fn execute_command(&self, command: &str, args: &[&str]) -> String {
println!(
"Executing command '{}' with args {:?} on Linux host",
command, args
);
todo!()
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!(
"Downloading file from {} to {} on Linux host",
url, destination
);
Ok(())
}
}
// Score implementations
struct LAMPScore {
mysql_version: String,
php_version: String,
apache_version: String,
}
impl LAMPScore {
fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self {
Self {
mysql_version: mysql_version.to_string(),
php_version: php_version.to_string(),
apache_version: apache_version.to_string(),
}
}
fn execute<T: K8sCapability>(&self, topology: &T) {
// Deploy MySQL
topology.deploy_k8s_resource("mysql-deployment.yaml");
// Deploy PHP
topology.deploy_k8s_resource("php-deployment.yaml");
// Deploy Apache
topology.deploy_k8s_resource("apache-deployment.yaml");
// Create service
topology.deploy_k8s_resource("lamp-service.yaml");
// Check deployment
let status = topology.execute_kubectl("get pods -l app=lamp");
println!("LAMP deployment status: {}", status);
}
}
struct BinaryScore {
url: String,
args: Vec<String>,
}
impl BinaryScore {
fn new(url: &str, args: Vec<&str>) -> Self {
Self {
url: url.to_string(),
args: args.iter().map(|s| s.to_string()).collect(),
}
}
fn execute<T: LinuxCapability>(&self, topology: &T) {
let destination = "/tmp/binary";
match topology.download_file(&self.url, destination) {
Ok(_) => {
println!("Binary downloaded successfully");
// Convert args to slice of &str
let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
// Execute the binary
topology.execute_command(destination, &args);
println!("Binary execution completed");
}
Err(e) => {
println!("Failed to download binary: {}", e);
}
}
}
}
struct LoadBalancerScore {
services: Vec<String>,
port: u16,
}
impl LoadBalancerScore {
fn new(services: Vec<&str>, port: u16) -> Self {
Self {
services: services.iter().map(|s| s.to_string()).collect(),
port,
}
}
fn execute<T: LoadBalancerCapability>(&self, topology: &T) {
println!("Configuring load balancer for services");
// Convert services to slice of &str
let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect();
// Configure load balancer
topology.configure_load_balancer(&services, self.port);
// Check status
let status = topology.get_load_balancer_status();
println!("Load balancer status: {}", status);
}
}

View File

@ -0,0 +1,34 @@
fn main() {}
trait Topology {}
struct DummyTopology {}
impl Topology for DummyTopology {}
impl Topology for LampTopology {}
struct LampTopology {}
struct Maestro {
topology: Box<dyn Topology>,
}
trait Score {
type Topology: Topology;
fn execute(&self, topology: &Self::Topology);
}
struct K8sScore {}
impl Score for K8sScore {
type Topology = LampTopology;
fn execute(&self, topology: &Box<dyn Self::Topology>) {
todo!()
}
}
impl Maestro {
pub fn execute<T: Topology>(&self, score: Box<dyn Score<Topology = T>>) {
score.execute(&self.topology);
}
}

View File

@ -0,0 +1,76 @@
fn main() {
// Example usage
let lamp_topology = LampTopology {};
let k8s_score = K8sScore {};
let docker_topology = DockerTopology{};
// Type-safe execution
let maestro = Maestro::new(Box::new(docker_topology));
maestro.execute(&k8s_score); // This will work
// This would fail at compile time if we tried:
// let dummy_topology = DummyTopology {};
// let maestro = Maestro::new(Box::new(dummy_topology));
// maestro.execute(&k8s_score); // Error: expected LampTopology, found DummyTopology
}
// Base trait for all topologies
trait Topology {
// Common topology methods could go here
fn topology_type(&self) -> &str;
}
struct DummyTopology {}
impl Topology for DummyTopology {
fn topology_type(&self) -> &str { "Dummy" }
}
struct LampTopology {}
impl Topology for LampTopology {
fn topology_type(&self) -> &str { "LAMP" }
}
struct DockerTopology {}
impl Topology for DockerTopology {
fn topology_type(&self) -> &str {
todo!("DockerTopology")
}
}
// The Score trait with an associated type for the required topology
trait Score {
type RequiredTopology: Topology + ?Sized;
fn execute(&self, topology: &Self::RequiredTopology);
fn score_type(&self) -> &str;
}
// A score that requires LampTopology
struct K8sScore {}
impl Score for K8sScore {
type RequiredTopology = DockerTopology;
fn execute(&self, topology: &Self::RequiredTopology) {
println!("Executing K8sScore on {} topology", topology.topology_type());
// Implementation details...
}
fn score_type(&self) -> &str { "K8s" }
}
// A generic maestro that can work with any topology type
struct Maestro<T: Topology + ?Sized> {
topology: Box<T>,
}
impl<T: Topology + ?Sized> Maestro<T> {
pub fn new(topology: Box<T>) -> Self {
Maestro { topology }
}
// Execute a score that requires this specific topology type
pub fn execute<S: Score<RequiredTopology = T>>(&self, score: &S) {
println!("Maestro executing {} score", score.score_type());
score.execute(&*self.topology);
}
}

View File

@ -0,0 +1,360 @@
fn main() {
// Create topologies
let okd_topology = OKDHaClusterTopology::new();
let k3d_topology = K3DTopology::new();
let linux_topology = LinuxTopology::new();
// Create scores - boxing them as trait objects for dynamic dispatch
let scores: Vec<Box<dyn Score>> = vec![
Box::new(LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4")),
Box::new(BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"])),
Box::new(LoadBalancerScore::new(vec!["service1", "service2"], 80)),
];
// Running scores on OKD topology (which has all capabilities)
println!("\n=== Running all scores on OKD HA Cluster ===");
for score in &scores {
match score.execute(&okd_topology) {
Ok(result) => println!("Score executed successfully: {}", result),
Err(e) => println!("Failed to execute score: {}", e),
}
}
// Running scores on K3D topology (only has K8s capability)
println!("\n=== Running scores on K3D Cluster ===");
for score in &scores {
match score.execute(&k3d_topology) {
Ok(result) => println!("Score executed successfully: {}", result),
Err(e) => println!("Failed to execute score: {}", e),
}
}
// Running scores on Linux topology (only has Linux capability)
println!("\n=== Running scores on Linux Host ===");
for score in &scores {
match score.execute(&linux_topology) {
Ok(result) => println!("Score executed successfully: {}", result),
Err(e) => println!("Failed to execute score: {}", e),
}
}
}
// Base Topology trait
trait Topology: Any {
fn name(&self) -> &str;
// This method allows us to get type information at runtime
fn as_any(&self) -> &dyn Any;
}
// Use Any trait for runtime type checking
use std::any::Any;
// Define capabilities
trait K8sCapability {
fn deploy_k8s_resource(&self, resource_yaml: &str);
fn execute_kubectl(&self, command: &str) -> String;
}
trait OKDCapability: K8sCapability {
fn execute_oc(&self, command: &str) -> String;
}
trait LinuxCapability {
fn execute_command(&self, command: &str, args: &[&str]);
fn download_file(&self, url: &str, destination: &str) -> Result<(), String>;
}
trait LoadBalancerCapability {
fn configure_load_balancer(&self, services: &[&str], port: u16);
fn get_load_balancer_status(&self) -> String;
}
// Base Score trait with dynamic dispatch
trait Score {
// Generic execute method that takes any topology
fn execute(&self, topology: &dyn Topology) -> Result<String, String>;
// Optional method to get score type for better error messages
fn score_type(&self) -> &str;
}
// Topology implementations
struct OKDHaClusterTopology {
cluster_name: String,
}
impl OKDHaClusterTopology {
fn new() -> Self {
Self { cluster_name: "okd-ha-cluster".to_string() }
}
}
impl Topology for OKDHaClusterTopology {
fn name(&self) -> &str {
&self.cluster_name
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl K8sCapability for OKDHaClusterTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on OKD cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on OKD cluster: {}", command);
"kubectl command output".to_string()
}
}
impl OKDCapability for OKDHaClusterTopology {
fn execute_oc(&self, command: &str) -> String {
println!("Executing oc command on OKD cluster: {}", command);
"oc command output".to_string()
}
}
impl LinuxCapability for OKDHaClusterTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
println!("Executing command '{}' with args {:?} on OKD node", command, args);
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!("Downloading file from {} to {} on OKD node", url, destination);
Ok(())
}
}
impl LoadBalancerCapability for OKDHaClusterTopology {
fn configure_load_balancer(&self, services: &[&str], port: u16) {
println!("Configuring load balancer for services {:?} on port {} in OKD", services, port);
}
fn get_load_balancer_status(&self) -> String {
"OKD Load Balancer: HEALTHY".to_string()
}
}
struct K3DTopology {
cluster_name: String,
}
impl K3DTopology {
fn new() -> Self {
Self { cluster_name: "k3d-local".to_string() }
}
}
impl Topology for K3DTopology {
fn name(&self) -> &str {
&self.cluster_name
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl K8sCapability for K3DTopology {
fn deploy_k8s_resource(&self, resource_yaml: &str) {
println!("Deploying K8s resource on K3D cluster: {}", resource_yaml);
}
fn execute_kubectl(&self, command: &str) -> String {
println!("Executing kubectl command on K3D cluster: {}", command);
"kubectl command output from K3D".to_string()
}
}
struct LinuxTopology {
hostname: String,
}
impl LinuxTopology {
fn new() -> Self {
Self { hostname: "linux-host".to_string() }
}
}
impl Topology for LinuxTopology {
fn name(&self) -> &str {
&self.hostname
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl LinuxCapability for LinuxTopology {
fn execute_command(&self, command: &str, args: &[&str]) {
println!("Executing command '{}' with args {:?} on Linux host", command, args);
}
fn download_file(&self, url: &str, destination: &str) -> Result<(), String> {
println!("Downloading file from {} to {} on Linux host", url, destination);
Ok(())
}
}
// Score implementations using dynamic capability checks
struct LAMPScore {
mysql_version: String,
php_version: String,
apache_version: String,
}
impl LAMPScore {
fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self {
Self {
mysql_version: mysql_version.to_string(),
php_version: php_version.to_string(),
apache_version: apache_version.to_string(),
}
}
// Helper method for typesafe execution
fn execute_with_k8s(&self, topology: &dyn K8sCapability) -> String {
println!("Deploying LAMP stack with MySQL {}, PHP {}, Apache {}",
self.mysql_version, self.php_version, self.apache_version);
// Deploy MySQL
topology.deploy_k8s_resource("mysql-deployment.yaml");
// Deploy PHP
topology.deploy_k8s_resource("php-deployment.yaml");
// Deploy Apache
topology.deploy_k8s_resource("apache-deployment.yaml");
// Create service
topology.deploy_k8s_resource("lamp-service.yaml");
// Check deployment
let status = topology.execute_kubectl("get pods -l app=lamp");
format!("LAMP deployment status: {}", status)
}
}
impl Score for LAMPScore {
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
// Try to downcast to K8sCapability
if let Some(k8s_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
Ok(self.execute_with_k8s(k8s_topology))
} else if let Some(k8s_topology) = topology.as_any().downcast_ref::<K3DTopology>() {
Ok(self.execute_with_k8s(k8s_topology))
} else {
Err(format!("LAMPScore requires K8sCapability but topology {} doesn't provide it",
topology.name()))
}
}
fn score_type(&self) -> &str {
"LAMP"
}
}
struct BinaryScore {
url: String,
args: Vec<String>,
}
impl BinaryScore {
fn new(url: &str, args: Vec<&str>) -> Self {
Self {
url: url.to_string(),
args: args.iter().map(|s| s.to_string()).collect(),
}
}
// Helper method for typesafe execution
fn execute_with_linux(&self, topology: &dyn LinuxCapability) -> Result<String, String> {
let destination = "/tmp/binary";
// Download the binary
println!("Preparing to run binary from {}", self.url);
match topology.download_file(&self.url, destination) {
Ok(_) => {
println!("Binary downloaded successfully");
// Convert args to slice of &str
let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect();
// Execute the binary
topology.execute_command(destination, &args);
Ok("Binary execution completed successfully".to_string())
},
Err(e) => {
Err(format!("Failed to download binary: {}", e))
}
}
}
}
impl Score for BinaryScore {
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
// Try to downcast to LinuxCapability
if let Some(linux_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
self.execute_with_linux(linux_topology)
} else if let Some(linux_topology) = topology.as_any().downcast_ref::<LinuxTopology>() {
self.execute_with_linux(linux_topology)
} else {
Err(format!("BinaryScore requires LinuxCapability but topology {} doesn't provide it",
topology.name()))
}
}
fn score_type(&self) -> &str {
"Binary"
}
}
struct LoadBalancerScore {
services: Vec<String>,
port: u16,
}
impl LoadBalancerScore {
fn new(services: Vec<&str>, port: u16) -> Self {
Self {
services: services.iter().map(|s| s.to_string()).collect(),
port,
}
}
// Helper method for typesafe execution
fn execute_with_lb(&self, topology: &dyn LoadBalancerCapability) -> String {
println!("Configuring load balancer for services");
// Convert services to slice of &str
let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect();
// Configure load balancer
topology.configure_load_balancer(&services, self.port);
// Check status
let status = topology.get_load_balancer_status();
format!("Load balancer configured successfully. Status: {}", status)
}
}
impl Score for LoadBalancerScore {
fn execute(&self, topology: &dyn Topology) -> Result<String, String> {
// Only OKDHaClusterTopology implements LoadBalancerCapability
if let Some(lb_topology) = topology.as_any().downcast_ref::<OKDHaClusterTopology>() {
Ok(self.execute_with_lb(lb_topology))
} else {
Err(format!("LoadBalancerScore requires LoadBalancerCapability but topology {} doesn't provide it",
topology.name()))
}
}
fn score_type(&self) -> &str {
"LoadBalancer"
}
}

5
check.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
set -e
cargo check --all-targets --all-features --keep-going
cargo fmt --check
cargo test

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 1.1 MiB

19
examples/cli/Cargo.toml Normal file
View File

@ -0,0 +1,19 @@
[package]
name = "example-cli"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
assert_cmd = "2.0.16"

20
examples/cli/src/main.rs Normal file
View File

@ -0,0 +1,20 @@
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::LocalhostTopology,
};
#[tokio::main]
async fn main() {
let inventory = Inventory::autoload();
let topology = LocalhostTopology::new();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@ -18,3 +18,4 @@ kube = "0.98.0"
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
http = "1.2.0"
serde_yaml = "0.9.34"
inquire.workspace = true

View File

@ -1,20 +1,32 @@
use std::collections::BTreeMap;
use harmony_macros::yaml;
use inquire::Confirm;
use k8s_openapi::{
api::{
apps::v1::{Deployment, DeploymentSpec},
core::v1::{Container, Node, Pod, PodSpec, PodTemplateSpec},
core::v1::{Container, PodSpec, PodTemplateSpec},
},
apimachinery::pkg::apis::meta::v1::LabelSelector,
};
use kube::{
Api, Client, Config, ResourceExt,
api::{ListParams, ObjectMeta, PostParams},
Api, Client, ResourceExt,
api::{ObjectMeta, PostParams},
};
#[tokio::main]
async fn main() {
let confirmation = Confirm::new(
"This will install various ressources to your default kubernetes cluster. Are you sure?",
)
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
if !confirmation {
return;
}
let client = Client::try_default()
.await
.expect("Should instanciate client from defaults");
@ -42,8 +54,7 @@ async fn main() {
// println!("found node {} status {:?}", n.name_any(), n.status.unwrap())
// }
let nginxdeployment = nginx_deployment_2();
let nginxdeployment = nginx_deployment_serde();
assert_eq!(nginx_deployment(), nginx_macro());
assert_eq!(nginx_deployment_2(), nginx_macro());
assert_eq!(nginx_deployment_serde(), nginx_macro());
let nginxdeployment = nginx_macro();
@ -149,6 +160,7 @@ fn nginx_deployment_2() -> Deployment {
deployment
}
fn nginx_deployment() -> Deployment {
let deployment = Deployment {
metadata: ObjectMeta {

18
examples/lamp/Cargo.toml Normal file
View File

@ -0,0 +1,18 @@
[package]
name = "example-lamp"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@ -0,0 +1,85 @@
<?php
ini_set('display_errors', 1);
error_reporting(E_ALL);
$host = getenv('MYSQL_HOST') ?: '';
$user = getenv('MYSQL_USER') ?: 'root';
$pass = getenv('MYSQL_PASSWORD') ?: '';
$db = 'testfill';
$charset = 'utf8mb4';
$dsn = "mysql:host=$host;charset=$charset";
$options = [
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
PDO::ATTR_DEFAULT_FETCH_MODE => PDO::FETCH_ASSOC,
];
try {
$pdo = new PDO($dsn, $user, $pass, $options);
$pdo->exec("CREATE DATABASE IF NOT EXISTS `$db`");
$pdo->exec("USE `$db`");
$pdo->exec("
CREATE TABLE IF NOT EXISTS filler (
id INT AUTO_INCREMENT PRIMARY KEY,
data LONGBLOB
)
");
} catch (\PDOException $e) {
die("❌ DB connection failed: " . $e->getMessage());
}
function getDbStats($pdo, $db) {
$stmt = $pdo->query("
SELECT
ROUND(SUM(data_length + index_length) / 1024 / 1024 / 1024, 2) AS total_size_gb,
SUM(table_rows) AS total_rows
FROM information_schema.tables
WHERE table_schema = '$db'
");
$result = $stmt->fetch();
$sizeGb = $result['total_size_gb'] ?? '0';
$rows = $result['total_rows'] ?? '0';
$avgMb = ($rows > 0) ? round(($sizeGb * 1024) / $rows, 2) : 0;
return [$sizeGb, $rows, $avgMb];
}
list($dbSize, $rowCount, $avgRowMb) = getDbStats($pdo, $db);
$message = '';
if ($_SERVER['REQUEST_METHOD'] === 'POST' && isset($_POST['fill'])) {
$iterations = 1024;
$data = str_repeat(random_bytes(1024), 1024); // 1MB
$stmt = $pdo->prepare("INSERT INTO filler (data) VALUES (:data)");
for ($i = 0; $i < $iterations; $i++) {
$stmt->execute([':data' => $data]);
}
list($dbSize, $rowCount, $avgRowMb) = getDbStats($pdo, $db);
$message = "<p style='color: green;'>✅ 1GB inserted into MariaDB successfully.</p>";
}
?>
<!DOCTYPE html>
<html>
<head>
<title>MariaDB Filler</title>
</head>
<body>
<h1>MariaDB Storage Filler</h1>
<?= $message ?>
<ul>
<li><strong>📦 MariaDB Used Size:</strong> <?= $dbSize ?> GB</li>
<li><strong>📊 Total Rows:</strong> <?= $rowCount ?></li>
<li><strong>📐 Average Row Size:</strong> <?= $avgRowMb ?> MB</li>
</ul>
<form method="post">
<button name="fill" value="1" type="submit">Insert 1GB into DB</button>
</form>
</body>
</html>

46
examples/lamp/src/main.rs Normal file
View File

@ -0,0 +1,46 @@
use harmony::{
data::Version,
inventory::Inventory,
maestro::Maestro,
modules::lamp::{LAMPConfig, LAMPScore},
topology::{K8sAnywhereTopology, Url},
};
#[tokio::main]
async fn main() {
// This here is the whole configuration to
// - setup a local K3D cluster
// - Build a docker image with the PHP project builtin and production grade settings
// - Deploy a mariadb database using a production grade helm chart
// - Deploy the new container using a kubernetes deployment
// - Configure networking between the PHP container and the database
// - Provision a public route and an SSL certificate automatically on production environments
//
// Enjoy :)
let lamp_stack = LAMPScore {
name: "harmony-lamp-demo".to_string(),
domain: Url::Url(url::Url::parse("https://lampdemo.harmony.nationtech.io").unwrap()),
php_version: Version::from("8.4.4").unwrap(),
// This config can be extended as needed for more complicated configurations
config: LAMPConfig {
project_root: "./php".into(),
database_size: format!("2Gi").into(),
..Default::default()
},
};
// You can choose the type of Topology you want, we suggest starting with the
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
// locally, to development environment from a CI, to staging, and to production with settings
// that automatically adapt to each environment grade.
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
maestro.register_all(vec![Box::new(lamp_stack)]);
// Here we bootstrap the CLI, this gives some nice features if you need them
harmony_cli::init(maestro, None).await.unwrap();
}
// That's it, end of the infra as code.

View File

@ -19,7 +19,6 @@ use harmony::{
},
tftp::TftpScore,
},
score::Score,
topology::{LogicalHost, UnmanagedRouter, Url},
};
use harmony_macros::{ip, mac_address};
@ -131,7 +130,7 @@ async fn main() {
"./data/watchguard/pxe-http-files".to_string(),
));
let ipxe_score = IpxeScore::new();
let mut maestro = Maestro::new(inventory, topology);
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),

View File

@ -12,7 +12,7 @@ use harmony::{
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::HttpScore,
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore},
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
opnsense::OPNsenseShellCommandScore,
tftp::TftpScore,
},
@ -78,14 +78,13 @@ async fn main() {
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
let dns_score = OKDDnsScore::new(&topology);
let load_balancer_score =
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = HttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
let mut maestro = Maestro::new(inventory, topology);
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),

View File

@ -1,20 +1,70 @@
use std::net::{SocketAddr, SocketAddrV4};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{dummy::{ErrorScore, PanicScore, SuccessScore}, k8s::deployment::K8sDeploymentScore},
topology::HAClusterTopology,
modules::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
load_balancer::LoadBalancerScore,
},
topology::{
BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService,
},
};
use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
let inventory = Inventory::autoload();
let topology = HAClusterTopology::autoload();
let mut maestro = Maestro::new(inventory, topology);
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
]);
harmony_tui::init(maestro).await.unwrap();
}
fn build_large_score() -> LoadBalancerScore {
let backend_server = BackendServer {
address: "192.168.0.0".to_string(),
port: 342,
};
let lb_service = LoadBalancerService {
backend_servers: vec![
backend_server.clone(),
backend_server.clone(),
backend_server.clone(),
],
listening_port: SocketAddr::V4(SocketAddrV4::new(ipv4!("192.168.0.0"), 49387)),
health_check: Some(HealthCheck::HTTP(
"/some_long_ass_path_to_see_how_it_is_displayed_but_it_has_to_be_even_longer"
.to_string(),
HttpMethod::GET,
HttpStatusCode::Success2xx,
)),
};
LoadBalancerScore {
public_services: vec![
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
],
private_services: vec![
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
lb_service.clone(),
],
}
}

View File

@ -7,25 +7,35 @@ license.workspace = true
[dependencies]
libredfish = "0.1.1"
reqwest = {version = "0.11", features = ["blocking", "json"] }
reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde = { version = "1.0.209", features = ["derive"] }
serde_json = "1.0.127"
tokio = { workspace = true }
derive-new = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
async-trait = { workspace = true }
cidr = { workspace = true }
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
env_logger.workspace = true
async-trait.workspace = true
cidr.workspace = true
opnsense-config = { path = "../opnsense-config" }
opnsense-config-xml = { path = "../opnsense-config-xml" }
harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid = { workspace = true }
url = { workspace = true }
kube = { workspace = true }
k8s-openapi = { workspace = true }
serde_yaml = { workspace = true }
http = { workspace = true }
uuid.workspace = true
url.workspace = true
kube.workspace = true
k8s-openapi.workspace = true
serde_yaml.workspace = true
http.workspace = true
serde-value.workspace = true
inquire.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true

View File

@ -0,0 +1,13 @@
use lazy_static::lazy_static;
use std::path::PathBuf;
lazy_static! {
pub static ref HARMONY_CONFIG_DIR: PathBuf = directories::BaseDirs::new()
.unwrap()
.data_dir()
.join("harmony");
pub static ref REGISTRY_URL: String = std::env::var("HARMONY_REGISTRY_URL")
.unwrap_or_else(|_| "hub.nationtech.io".to_string());
pub static ref REGISTRY_PROJECT: String =
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
}

View File

@ -2,6 +2,8 @@ use std::sync::Arc;
use derive_new::new;
use harmony_types::net::MacAddress;
use serde::{Serialize, Serializer, ser::SerializeStruct};
use serde_value::Value;
pub type HostGroup = Vec<PhysicalHost>;
pub type SwitchGroup = Vec<Switch>;
@ -75,10 +77,7 @@ impl PhysicalHost {
}
pub fn label(mut self, name: String, value: String) -> Self {
self.labels.push(Label {
_name: name,
_value: value,
});
self.labels.push(Label { name, value });
self
}
@ -88,7 +87,49 @@ impl PhysicalHost {
}
}
#[derive(new)]
// Custom Serialize implementation for PhysicalHost
impl Serialize for PhysicalHost {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Determine the number of fields
let mut num_fields = 5; // category, network, storage, labels, management
if self.memory_size.is_some() {
num_fields += 1;
}
if self.cpu_count.is_some() {
num_fields += 1;
}
// Create a serialization structure
let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
// Serialize the standard fields
state.serialize_field("category", &self.category)?;
state.serialize_field("network", &self.network)?;
state.serialize_field("storage", &self.storage)?;
state.serialize_field("labels", &self.labels)?;
// Serialize optional fields
if let Some(memory) = self.memory_size {
state.serialize_field("memory_size", &memory)?;
}
if let Some(cpu) = self.cpu_count {
state.serialize_field("cpu_count", &cpu)?;
}
let mgmt_data = self.management.serialize_management();
// pub management: Arc<dyn ManagementInterface>,
// Handle management interface - either as a field or flattened
state.serialize_field("management", &mgmt_data)?;
state.end()
}
}
#[derive(new, Serialize)]
pub struct ManualManagementInterface;
impl ManagementInterface for ManualManagementInterface {
@ -102,7 +143,7 @@ impl ManagementInterface for ManualManagementInterface {
}
}
pub trait ManagementInterface: Send + Sync {
pub trait ManagementInterface: Send + Sync + SerializableManagement {
fn boot_to_pxe(&self);
fn get_supported_protocol_names(&self) -> String;
}
@ -116,21 +157,49 @@ impl std::fmt::Debug for dyn ManagementInterface {
}
}
#[derive(Debug, Clone)]
// Define a trait for serializing management interfaces
pub trait SerializableManagement {
fn serialize_management(&self) -> Value;
}
// Provide a blanket implementation for all types that implement both ManagementInterface and Serialize
impl<T> SerializableManagement for T
where
T: ManagementInterface + Serialize,
{
fn serialize_management(&self) -> Value {
serde_value::to_value(self).expect("ManagementInterface should serialize successfully")
}
}
#[derive(Debug, Clone, Serialize)]
pub enum HostCategory {
Server,
Firewall,
Switch,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct NetworkInterface {
pub name: Option<String>,
pub mac_address: MacAddress,
pub speed: Option<u64>,
}
#[derive(Debug, new, Clone)]
#[cfg(test)]
use harmony_macros::mac_address;
#[cfg(test)]
impl NetworkInterface {
pub fn dummy() -> Self {
Self {
name: Some(String::new()),
mac_address: mac_address!("00:00:00:00:00:00"),
speed: Some(0),
}
}
}
#[derive(Debug, new, Clone, Serialize)]
pub enum StorageConnectionType {
Sata3g,
Sata6g,
@ -138,13 +207,13 @@ pub enum StorageConnectionType {
Sas12g,
PCIE,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub enum StorageKind {
SSD,
NVME,
HDD,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Storage {
pub connection: StorageConnectionType,
pub kind: StorageKind,
@ -152,20 +221,33 @@ pub struct Storage {
pub serial: String,
}
#[derive(Debug, Clone)]
#[cfg(test)]
impl Storage {
pub fn dummy() -> Self {
Self {
connection: StorageConnectionType::Sata3g,
kind: StorageKind::SSD,
size: 0,
serial: String::new(),
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Switch {
_interface: Vec<NetworkInterface>,
_management_interface: NetworkInterface,
}
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct Label {
_name: String,
_value: String,
pub name: String,
pub value: String,
}
pub type Address = String;
#[derive(new, Debug)]
#[derive(new, Debug, Serialize)]
pub struct Location {
pub address: Address,
pub name: String,
@ -179,3 +261,158 @@ impl Location {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
// Mock implementation of ManagementInterface
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockHPIlo {
ip: String,
username: String,
password: String,
firmware_version: String,
}
impl ManagementInterface for MockHPIlo {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
// Another mock implementation
#[derive(Debug, Clone, Serialize, Deserialize)]
struct MockDellIdrac {
hostname: String,
port: u16,
api_token: String,
}
impl ManagementInterface for MockDellIdrac {
fn boot_to_pxe(&self) {}
fn get_supported_protocol_names(&self) -> String {
String::new()
}
}
#[test]
fn test_serialize_physical_host_with_hp_ilo() {
// Create a PhysicalHost with HP iLO management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockHPIlo {
ip: "192.168.1.100".to_string(),
username: "admin".to_string(),
password: "password123".to_string(),
firmware_version: "2.5.0".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())],
memory_size: Some(64_000_000),
cpu_count: Some(16),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the HP iLO details
assert!(json.contains("192.168.1.100"));
assert!(json.contains("admin"));
assert!(json.contains("password123"));
assert!(json.contains("firmware_version"));
assert!(json.contains("2.5.0"));
// Parse back to verify structure (not the exact management interface)
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 16);
assert_eq!(parsed["memory_size"], 64_000_000);
assert_eq!(parsed["network"][0]["name"], "");
}
#[test]
fn test_serialize_physical_host_with_dell_idrac() {
// Create a PhysicalHost with Dell iDRAC management
let host = PhysicalHost {
category: HostCategory::Server,
network: vec![NetworkInterface::dummy()],
management: Arc::new(MockDellIdrac {
hostname: "idrac-server01".to_string(),
port: 443,
api_token: "abcdef123456".to_string(),
}),
storage: vec![Storage::dummy()],
labels: vec![Label::new("env".to_string(), "production".to_string())],
memory_size: Some(128_000_000),
cpu_count: Some(32),
};
// Serialize to JSON
let json = serde_json::to_string(&host).expect("Failed to serialize host");
// Check that the serialized JSON contains the Dell iDRAC details
assert!(json.contains("idrac-server01"));
assert!(json.contains("443"));
assert!(json.contains("abcdef123456"));
// Parse back to verify structure
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON");
// Verify basic structure
assert_eq!(parsed["cpu_count"], 32);
assert_eq!(parsed["memory_size"], 128_000_000);
assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null);
}
#[test]
fn test_different_management_implementations_produce_valid_json() {
// Create hosts with different management implementations
let host1 = PhysicalHost {
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockHPIlo {
ip: "10.0.0.1".to_string(),
username: "root".to_string(),
password: "secret".to_string(),
firmware_version: "3.0.0".to_string(),
}),
storage: vec![],
labels: vec![],
memory_size: None,
cpu_count: None,
};
let host2 = PhysicalHost {
category: HostCategory::Server,
network: vec![],
management: Arc::new(MockDellIdrac {
hostname: "server02-idrac".to_string(),
port: 8443,
api_token: "token123".to_string(),
}),
storage: vec![],
labels: vec![],
memory_size: None,
cpu_count: None,
};
// Both should serialize successfully
let json1 = serde_json::to_string(&host1).expect("Failed to serialize host1");
let json2 = serde_json::to_string(&host2).expect("Failed to serialize host2");
// Both JSONs should be valid and parseable
let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1");
let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2");
// The JSONs should be different because they contain different management interfaces
assert_ne!(json1, json2);
}
}

View File

@ -7,7 +7,6 @@ use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::HAClusterTopology,
};
pub enum InterpretName {
@ -20,6 +19,7 @@ pub enum InterpretName {
Dummy,
Panic,
OPNSense,
K3dInstallation,
}
impl std::fmt::Display for InterpretName {
@ -34,17 +34,15 @@ impl std::fmt::Display for InterpretName {
InterpretName::Dummy => f.write_str("Dummy"),
InterpretName::Panic => f.write_str("Panic"),
InterpretName::OPNSense => f.write_str("OPNSense"),
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
}
}
}
#[async_trait]
pub trait Interpret: std::fmt::Debug + Send {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
) -> Result<Outcome, InterpretError>;
pub trait Interpret<T>: std::fmt::Debug + Send {
async fn execute(&self, inventory: &Inventory, topology: &T)
-> Result<Outcome, InterpretError>;
fn get_name(&self) -> InterpretName;
fn get_version(&self) -> Version;
fn get_status(&self) -> InterpretStatus;

View File

@ -1,41 +1,82 @@
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use log::info;
use log::{info, warn};
use super::{
interpret::{Interpret, InterpretError, Outcome},
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
type ScoreVec = Vec<Box<dyn Score>>;
type ScoreVec<T> = Vec<Box<dyn Score<T>>>;
pub struct Maestro {
pub struct Maestro<T: Topology> {
inventory: Inventory,
topology: HAClusterTopology,
scores: Arc<RwLock<ScoreVec>>,
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_preparation_result: Mutex<Option<Outcome>>,
}
impl Maestro {
pub fn new(inventory: Inventory, topology: HAClusterTopology) -> Self {
impl<T: Topology> Maestro<T> {
pub fn new(inventory: Inventory, topology: T) -> Self {
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_preparation_result: None.into(),
}
}
pub fn start(&mut self) {
info!("Starting Maestro");
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let instance = Self::new(inventory, topology);
instance.prepare_topology().await?;
Ok(instance)
}
pub fn register_all(&mut self, mut scores: ScoreVec) {
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
info!("Ensuring topology '{}' is ready...", self.topology.name());
let outcome = self.topology.ensure_ready().await?;
info!(
"Topology '{}' readiness check complete: {}",
self.topology.name(),
outcome.status
);
self.topology_preparation_result
.lock()
.unwrap()
.replace(outcome.clone());
Ok(outcome)
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
let mut score_mut = self.scores.write().expect("Should acquire lock");
score_mut.append(&mut scores);
}
pub async fn interpret(&self, score: Box<dyn Score>) -> Result<Outcome, InterpretError> {
fn is_topology_initialized(&self) -> bool {
let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() {
match outcome.status {
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else {
false
}
}
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
if !self.is_topology_initialized() {
warn!(
"Launching interpret for score {} but Topology {} is not fully initialized!",
score.name(),
self.topology.name(),
);
}
info!("Running score {score:?}");
let interpret = score.create_interpret();
info!("Launching interpret {interpret:?}");
@ -44,7 +85,7 @@ impl Maestro {
result
}
pub fn scores(&self) -> Arc<RwLock<ScoreVec>> {
pub fn scores(&self) -> Arc<RwLock<ScoreVec<T>>> {
self.scores.clone()
}
}

View File

@ -1,3 +1,4 @@
pub mod config;
pub mod data;
pub mod executors;
pub mod filter;

View File

@ -1,7 +1,231 @@
use super::interpret::Interpret;
use std::collections::BTreeMap;
pub trait Score: std::fmt::Debug + Send + Sync {
fn create_interpret(&self) -> Box<dyn Interpret>;
use serde::Serialize;
use serde_value::Value;
use super::{interpret::Interpret, topology::Topology};
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn Score>;
}
pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value;
}
impl<'de, S, T> SerializeScore<T> for S
where
T: Topology,
S: Score<T> + Serialize,
{
fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble
// up?
serde_value::to_value(&self).expect("Score should serialize successfully")
}
}
pub trait CloneBoxScore<T: Topology> {
fn clone_box(&self) -> Box<dyn Score<T>>;
}
impl<S, T> CloneBoxScore<T> for S
where
T: Topology,
S: Score<T> + Clone + 'static,
{
fn clone_box(&self) -> Box<dyn Score<T>> {
Box::new(self.clone())
}
}
pub trait ScoreToString<T: Topology> {
fn print_score_details(&self) -> String;
fn format_value_as_string(&self, val: &Value, indent: usize) -> String;
fn format_map(&self, map: &BTreeMap<Value, Value>, indent: usize) -> String;
fn wrap_or_truncate(&self, s: &str, width: usize) -> Vec<String>;
}
impl<S, T> ScoreToString<T> for S
where
T: Topology,
S: Score<T> + 'static,
{
fn print_score_details(&self) -> String {
let mut output = String::new();
output += "\n";
output += &self.format_value_as_string(&self.serialize(), 0);
output += "\n";
output
}
fn format_map(&self, map: &BTreeMap<Value, Value>, indent: usize) -> String {
let pad = " ".repeat(indent * 2);
let mut output = String::new();
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n",
pad
);
output += &format!("{}| {:<24} | {:<48} |\n", pad, "score_name", self.name());
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n",
pad
);
for (k, v) in map {
let key_str = match k {
Value::String(s) => s.clone(),
other => format!("{:?}", other),
};
let formatted_val = self.format_value_as_string(v, indent + 1);
let lines = formatted_val.lines().map(|line| line.trim_start());
let wrapped_lines: Vec<_> = lines
.flat_map(|line| self.wrap_or_truncate(line.trim_start(), 48))
.collect();
if let Some(first) = wrapped_lines.first() {
output += &format!("{}| {:<24} | {:<48} |\n", pad, key_str, first);
for line in &wrapped_lines[1..] {
output += &format!("{}| {:<24} | {:<48} |\n", pad, "", line);
}
}
// let first_line = lines.next().unwrap_or("");
// output += &format!("{}| {:<24} | {:<48} |\n", pad, key_str, first_line);
//
// for line in lines {
// output += &format!("{}| {:<24} | {:<48} |\n", pad, "", line);
// }
}
output += &format!(
"{}+--------------------------+--------------------------------------------------+\n\n",
pad
);
output
}
fn wrap_or_truncate(&self, s: &str, width: usize) -> Vec<String> {
let mut lines = Vec::new();
let mut current = s;
while !current.is_empty() {
if current.len() <= width {
lines.push(current.to_string());
break;
}
// Try to wrap at whitespace if possible
let mut split_index = current[..width].rfind(' ').unwrap_or(width);
if split_index == 0 {
split_index = width;
}
lines.push(current[..split_index].trim_end().to_string());
current = current[split_index..].trim_start();
}
lines
}
fn format_value_as_string(&self, val: &Value, indent: usize) -> String {
let pad = " ".repeat(indent * 2);
let mut output = String::new();
match val {
Value::Bool(b) => output += &format!("{}{}\n", pad, b),
Value::U8(u) => output += &format!("{}{}\n", pad, u),
Value::U16(u) => output += &format!("{}{}\n", pad, u),
Value::U32(u) => output += &format!("{}{}\n", pad, u),
Value::U64(u) => output += &format!("{}{}\n", pad, u),
Value::I8(i) => output += &format!("{}{}\n", pad, i),
Value::I16(i) => output += &format!("{}{}\n", pad, i),
Value::I32(i) => output += &format!("{}{}\n", pad, i),
Value::I64(i) => output += &format!("{}{}\n", pad, i),
Value::F32(f) => output += &format!("{}{}\n", pad, f),
Value::F64(f) => output += &format!("{}{}\n", pad, f),
Value::Char(c) => output += &format!("{}{}\n", pad, c),
Value::String(s) => output += &format!("{}{:<48}\n", pad, s),
Value::Unit => output += &format!("{}<unit>\n", pad),
Value::Bytes(bytes) => output += &format!("{}{:?}\n", pad, bytes),
Value::Option(opt) => match opt {
Some(inner) => {
output += &format!("{}Option:\n", pad);
output += &self.format_value_as_string(inner, indent + 1);
}
None => output += &format!("{}None\n", pad),
},
Value::Newtype(inner) => {
output += &format!("{}Newtype:\n", pad);
output += &self.format_value_as_string(inner, indent + 1);
}
Value::Seq(seq) => {
if seq.is_empty() {
output += &format!("{}[]\n", pad);
} else {
output += &format!("{}[\n", pad);
for item in seq {
output += &self.format_value_as_string(item, indent + 1);
}
output += &format!("{}]\n", pad);
}
}
Value::Map(map) => {
if map.is_empty() {
output += &format!("{}<empty map>\n", pad);
} else if indent == 0 {
output += &self.format_map(map, indent);
} else {
for (k, v) in map {
let key_str = match k {
Value::String(s) => s.clone(),
other => format!("{:?}", other),
};
let val_str = self
.format_value_as_string(v, indent + 1)
.trim()
.to_string();
let val_lines: Vec<_> = val_str.lines().collect();
output +=
&format!("{}{}: {}\n", pad, key_str, val_lines.first().unwrap_or(&""));
for line in val_lines.iter().skip(1) {
output += &format!("{} {}\n", pad, line);
}
}
}
}
}
output
}
}
//TODO write test to check that the output is what it should be
//
#[cfg(test)]
mod tests {
use super::*;
use crate::modules::dns::DnsScore;
use crate::topology::HAClusterTopology;
#[test]
fn test_format_values_as_string() {
let dns_score = Box::new(DnsScore::new(vec![], None));
let print_score_output =
<DnsScore as ScoreToString<HAClusterTopology>>::print_score_details(&dns_score);
let expected_empty_dns_score_table = "\n+--------------------------+--------------------------------------------------+\n| score_name | DnsScore |\n+--------------------------+--------------------------------------------------+\n| dns_entries | [] |\n| register_dhcp_leases | None |\n+--------------------------+--------------------------------------------------+\n\n\n";
assert_eq!(print_score_output, expected_empty_dns_score_table);
}
}

View File

@ -1,8 +1,11 @@
use async_trait::async_trait;
use harmony_macros::ip;
use harmony_types::net::MacAddress;
use log::info;
use crate::executors::ExecutorError;
use crate::interpret::InterpretError;
use crate::interpret::Outcome;
use super::DHCPStaticEntry;
use super::DhcpServer;
@ -12,14 +15,16 @@ use super::DnsServer;
use super::Firewall;
use super::HttpServer;
use super::IpAddress;
use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::Router;
use super::TftpServer;
use super::Topology;
use super::Url;
use super::openshift::OpenshiftClient;
use super::k8s::K8sClient;
use std::sync::Arc;
#[derive(Debug, Clone)]
@ -38,11 +43,28 @@ pub struct HAClusterTopology {
pub switch: Vec<LogicalHost>,
}
impl HAClusterTopology {
pub async fn oc_client(&self) -> Result<Arc<OpenshiftClient>, kube::Error> {
Ok(Arc::new(OpenshiftClient::try_default().await?))
#[async_trait]
impl Topology for HAClusterTopology {
fn name(&self) -> &str {
todo!()
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
todo!(
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
)
}
}
#[async_trait]
impl K8sclient for HAClusterTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
Ok(Arc::new(
K8sClient::try_default().await.map_err(|e| e.to_string())?,
))
}
}
impl HAClusterTopology {
pub fn autoload() -> Self {
let dummy_infra = Arc::new(DummyInfra {});
let dummy_host = LogicalHost {
@ -67,7 +89,167 @@ impl HAClusterTopology {
}
}
struct DummyInfra;
#[async_trait]
impl DnsServer for HAClusterTopology {
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
self.dns_server.register_dhcp_leases(register).await
}
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
self.dns_server.register_hosts(hosts).await
}
fn remove_record(&self, name: &str, record_type: DnsRecordType) -> Result<(), ExecutorError> {
self.dns_server.remove_record(name, record_type)
}
async fn list_records(&self) -> Vec<DnsRecord> {
self.dns_server.list_records().await
}
fn get_ip(&self) -> IpAddress {
self.dns_server.get_ip()
}
fn get_host(&self) -> LogicalHost {
self.dns_server.get_host()
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.dns_server.commit_config().await
}
}
#[async_trait]
impl LoadBalancer for HAClusterTopology {
fn get_ip(&self) -> IpAddress {
self.load_balancer.get_ip()
}
fn get_host(&self) -> LogicalHost {
self.load_balancer.get_host()
}
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
self.load_balancer.add_service(service).await
}
async fn remove_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
self.load_balancer.remove_service(service).await
}
async fn list_services(&self) -> Vec<LoadBalancerService> {
self.load_balancer.list_services().await
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.load_balancer.ensure_initialized().await
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.load_balancer.commit_config().await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.load_balancer.reload_restart().await
}
}
#[async_trait]
impl DhcpServer for HAClusterTopology {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> {
self.dhcp_server.add_static_mapping(entry).await
}
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError> {
self.dhcp_server.remove_static_mapping(mac).await
}
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> {
self.dhcp_server.list_static_mappings().await
}
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> {
self.dhcp_server.set_next_server(ip).await
}
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_boot_filename(boot_filename).await
}
fn get_ip(&self) -> IpAddress {
self.dhcp_server.get_ip()
}
fn get_host(&self) -> LogicalHost {
self.dhcp_server.get_host()
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.dhcp_server.commit_config().await
}
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename(filename).await
}
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filename64(filename64).await
}
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
self.dhcp_server.set_filenameipxe(filenameipxe).await
}
}
#[async_trait]
impl TftpServer for HAClusterTopology {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
self.tftp_server.serve_files(url).await
}
fn get_ip(&self) -> IpAddress {
self.tftp_server.get_ip()
}
async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> {
self.tftp_server.set_ip(ip).await
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.tftp_server.ensure_initialized().await
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.tftp_server.commit_config().await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.tftp_server.reload_restart().await
}
}
impl Router for HAClusterTopology {
fn get_gateway(&self) -> super::IpAddress {
self.router.get_gateway()
}
fn get_cidr(&self) -> cidr::Ipv4Cidr {
self.router.get_cidr()
}
fn get_host(&self) -> LogicalHost {
self.router.get_host()
}
}
#[async_trait]
impl HttpServer for HAClusterTopology {
async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> {
self.http_server.serve_files(url).await
}
fn get_ip(&self) -> IpAddress {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
}
#[derive(Debug)]
pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
todo!()
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(Outcome::success(dummy_msg.to_string()))
}
}
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";
@ -219,11 +401,7 @@ impl DnsServer for DummyInfra {
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
fn remove_record(
&mut self,
_name: &str,
_record_type: DnsRecordType,
) -> Result<(), ExecutorError> {
fn remove_record(&self, _name: &str, _record_type: DnsRecordType) -> Result<(), ExecutorError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn list_records(&self) -> Vec<DnsRecord> {

View File

@ -0,0 +1 @@
pub trait HelmCommand {}

View File

@ -1,4 +1,5 @@
use derive_new::new;
use serde::Serialize;
use crate::hardware::PhysicalHost;
@ -8,7 +9,7 @@ use super::LogicalHost;
///
/// This is the only construct that directly maps a logical host to a physical host.
/// It serves as a bridge between the logical cluster structure and the physical infrastructure.
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct HostBinding {
/// Reference to the LogicalHost
pub logical_host: LogicalHost,

View File

@ -1,12 +1,14 @@
use derive_new::new;
use k8s_openapi::NamespaceResourceScope;
use kube::{Api, Client, Error, Resource, api::PostParams};
use serde::de::DeserializeOwned;
pub struct OpenshiftClient {
#[derive(new)]
pub struct K8sClient {
client: Client,
}
impl OpenshiftClient {
impl K8sClient {
pub async fn try_default() -> Result<Self, Error> {
Ok(Self {
client: Client::try_default().await?,
@ -36,7 +38,7 @@ impl OpenshiftClient {
Ok(result)
}
pub async fn apply_namespaced<K>(&self, resource: &Vec<K>) -> Result<K, Error>
pub async fn apply_namespaced<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<K, Error>
where
K: Resource<Scope = NamespaceResourceScope>
+ Clone
@ -47,7 +49,10 @@ impl OpenshiftClient {
<K as kube::Resource>::DynamicType: Default,
{
for r in resource.iter() {
let api: Api<K> = Api::default_namespaced(self.client.clone());
let api: Api<K> = match ns {
Some(ns) => Api::namespaced(self.client.clone(), ns),
None => Api::default_namespaced(self.client.clone()),
};
api.create(&PostParams::default(), &r).await?;
}
todo!("")

View File

@ -0,0 +1,202 @@
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use inquire::Confirm;
use log::{info, warn};
use tokio::sync::OnceCell;
use crate::{
interpret::{InterpretError, Outcome},
inventory::Inventory,
maestro::Maestro,
modules::k3d::K3DInstallationScore,
topology::LocalhostTopology,
};
use super::{HelmCommand, K8sclient, Topology, k8s::K8sClient};
struct K8sState {
client: Arc<K8sClient>,
_source: K8sSource,
message: String,
}
enum K8sSource {
LocalK3d,
}
pub struct K8sAnywhereTopology {
k8s_state: OnceCell<Option<K8sState>>,
}
#[async_trait]
impl K8sclient for K8sAnywhereTopology {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
let state = match self.k8s_state.get() {
Some(state) => state,
None => return Err("K8s state not initialized yet".to_string()),
};
let state = match state {
Some(state) => state,
None => return Err("K8s client initialized but empty".to_string()),
};
Ok(state.client.clone())
}
}
impl K8sAnywhereTopology {
pub fn new() -> Self {
Self {
k8s_state: OnceCell::new(),
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
.output()
.map_err(|e| format!("Failed to execute 'helm -version': {}", e))?;
if !version_result.status.success() {
return Err("Failed to run 'helm -version'".to_string());
}
// Print the version output
let version_output = String::from_utf8_lossy(&version_result.stdout);
println!("Helm version: {}", version_output.trim());
Ok(())
}
async fn try_load_system_kubeconfig(&self) -> Option<K8sClient> {
todo!("Use kube-rs default behavior to load system kubeconfig");
}
async fn try_load_kubeconfig(&self, path: &str) -> Option<K8sClient> {
todo!("Use kube-rs to load kubeconfig at path {path}");
}
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
let maestro = Maestro::initialize(Inventory::autoload(), LocalhostTopology::new()).await?;
let k3d_score = self.get_k3d_installation_score();
maestro.interpret(Box::new(k3d_score)).await?;
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
let k8s_anywhere_config = K8sAnywhereConfig {
kubeconfig: std::env::var("HARMONY_KUBECONFIG")
.ok()
.map(|v| v.to_string()),
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
};
if k8s_anywhere_config.use_system_kubeconfig {
match self.try_load_system_kubeconfig().await {
Some(_client) => todo!(),
None => todo!(),
}
}
if let Some(kubeconfig) = k8s_anywhere_config.kubeconfig {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(_client) => todo!(),
None => todo!(),
}
}
info!("No kubernetes configuration found");
if !k8s_anywhere_config.autoinstall {
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
.with_default(false)
.prompt()
.expect("Unexpected prompt error");
if !confirmation {
warn!(
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
);
return Ok(None);
}
}
info!("Starting K8sAnywhere installation");
self.try_install_k3d().await?;
let k3d_score = self.get_k3d_installation_score();
// I feel like having to rely on the k3d_rs crate here is a smell
// I think we should have a way to interact more deeply with scores/interpret. Maybe the
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
// good implementation due to the stateful nature of the k3d thing. Which is why I went
// with this solution for now
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),
_source: K8sSource::LocalK3d,
message: "Successfully installed K3D cluster and acquired client".to_string(),
},
Err(_) => todo!(),
};
Ok(Some(state))
}
}
struct K8sAnywhereConfig {
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
/// cluster
///
/// Default : None
kubeconfig: Option<String>,
/// Whether to use the system KUBECONFIG, either the environment variable or the file in the
/// default or configured location
///
/// Default : false
use_system_kubeconfig: bool,
/// Whether to install automatically a kubernetes cluster
///
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
///
/// Default: true
autoinstall: bool,
}
#[async_trait]
impl Topology for K8sAnywhereTopology {
fn name(&self) -> &str {
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
match self.is_helm_available() {
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}
impl HelmCommand for K8sAnywhereTopology {}

View File

@ -2,6 +2,7 @@ use std::{net::SocketAddr, str::FromStr};
use async_trait::async_trait;
use log::debug;
use serde::Serialize;
use super::{IpAddress, LogicalHost};
use crate::executors::ExecutorError;
@ -36,20 +37,21 @@ impl std::fmt::Debug for dyn LoadBalancer {
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
}
}
#[derive(Debug, PartialEq, Clone)]
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct LoadBalancerService {
pub backend_servers: Vec<BackendServer>,
pub listening_port: SocketAddr,
pub health_check: Option<HealthCheck>,
}
#[derive(Debug, PartialEq, Clone)]
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct BackendServer {
// TODO should not be a string, probably IPAddress
pub address: String,
pub port: u16,
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HttpMethod {
GET,
POST,
@ -91,14 +93,14 @@ impl std::fmt::Display for HttpMethod {
}
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HttpStatusCode {
Success2xx,
UserError4xx,
ServerError5xx,
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum HealthCheck {
HTTP(String, HttpMethod, HttpStatusCode),
TCP(Option<u16>),

View File

@ -0,0 +1,25 @@
use async_trait::async_trait;
use derive_new::new;
use crate::interpret::{InterpretError, Outcome};
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
#[async_trait]
impl Topology for LocalhostTopology {
fn name(&self) -> &str {
"LocalHostTopology"
}
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
Ok(Outcome::success(
"Localhost is Chuck Norris, always ready.".to_string(),
))
}
}
// TODO: Delete this, temp for test
impl HelmCommand for LocalhostTopology {}

View File

@ -1,10 +1,15 @@
mod ha_cluster;
mod host_binding;
mod http;
mod k8s_anywhere;
mod localhost;
pub use k8s_anywhere::*;
pub use localhost::*;
pub mod k8s;
mod load_balancer;
pub mod openshift;
mod router;
mod tftp;
use async_trait::async_trait;
pub use ha_cluster::*;
pub use load_balancer::*;
pub use router::*;
@ -12,23 +17,73 @@ mod network;
pub use host_binding::*;
pub use http::*;
pub use network::*;
use serde::Serialize;
pub use tftp::*;
use std::{net::IpAddr, sync::Arc};
mod helm_command;
pub use helm_command::*;
use std::net::IpAddr;
use super::interpret::{InterpretError, Outcome};
/// Represents a logical view of an infrastructure environment providing specific capabilities.
///
/// A Topology acts as a self-contained "package" responsible for managing access
/// to its underlying resources and ensuring they are in a ready state before use.
/// It defines the contract for the capabilities it provides through implemented
/// capability traits (e.g., `HasK8sCapability`, `HasDnsServer`).
#[async_trait]
pub trait Topology: Send + Sync {
/// Returns a unique identifier or name for this specific topology instance.
/// This helps differentiate between multiple instances of potentially the same type.
fn name(&self) -> &str;
/// Ensures that the topology and its required underlying components or services
/// are ready to provide their declared capabilities.
///
/// Implementations of this method MUST be idempotent. Subsequent calls after a
/// successful readiness check should ideally be cheap NO-OPs.
///
/// This method encapsulates the logic for:
/// 1. **Checking Current State:** Assessing if the required resources/services are already running and configured.
/// 2. **Discovery:** Identifying the runtime environment (e.g., local Docker, AWS, existing cluster).
/// 3. **Initialization/Bootstrapping:** Performing necessary setup actions if not already ready. This might involve:
/// * Making API calls.
/// * Running external commands (e.g., `k3d`, `docker`).
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
///
/// # Returns
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
}
pub type IpAddress = IpAddr;
#[derive(Debug, Clone)]
pub enum Url {
LocalFolder(String),
Remote(url::Url),
Url(url::Url),
}
impl Serialize for Url {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(&url.as_str()),
}
}
}
impl std::fmt::Display for Url {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Url::LocalFolder(path) => write!(f, "{}", path),
Url::Remote(url) => write!(f, "{}", url),
Url::Url(url) => write!(f, "{}", url),
}
}
}
@ -42,7 +97,7 @@ impl std::fmt::Display for Url {
/// - A control plane node
///
/// This abstraction focuses on the logical role and services, independent of the physical hardware.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct LogicalHost {
/// The IP address of this logical host.
pub ip: IpAddress,
@ -124,3 +179,23 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn test_serialize_local_folder() {
let url = Url::LocalFolder("path/to/folder".to_string());
let serialized = serde_json::to_string(&url).unwrap();
assert_eq!(serialized, "\"path/to/folder\"");
}
#[test]
fn test_serialize_url() {
let url = Url::Url(url::Url::parse("https://example.com").unwrap());
let serialized = serde_json::to_string(&url).unwrap();
assert_eq!(serialized, "\"https://example.com/\"");
}
}

View File

@ -1,11 +1,12 @@
use std::{net::Ipv4Addr, str::FromStr};
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use serde::Serialize;
use crate::executors::ExecutorError;
use super::{IpAddress, LogicalHost};
use super::{IpAddress, LogicalHost, k8s::K8sClient};
#[derive(Debug)]
pub struct DHCPStaticEntry {
@ -40,9 +41,13 @@ impl std::fmt::Debug for dyn Firewall {
pub struct NetworkDomain {
pub name: String,
}
#[async_trait]
pub trait K8sclient: Send + Sync {
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>;
}
#[async_trait]
pub trait DhcpServer: Send + Sync {
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
@ -56,21 +61,11 @@ pub trait DhcpServer: Send + Sync {
async fn commit_config(&self) -> Result<(), ExecutorError>;
}
impl std::fmt::Debug for dyn DhcpServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("DhcpServer {}", self.get_ip()))
}
}
#[async_trait]
pub trait DnsServer: Send + Sync {
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError>;
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError>;
fn remove_record(
&mut self,
name: &str,
record_type: DnsRecordType,
) -> Result<(), ExecutorError>;
fn remove_record(&self, name: &str, record_type: DnsRecordType) -> Result<(), ExecutorError>;
async fn list_records(&self) -> Vec<DnsRecord>;
fn get_ip(&self) -> IpAddress;
fn get_host(&self) -> LogicalHost;
@ -121,7 +116,7 @@ pub enum Action {
Deny,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub enum DnsRecordType {
A,
AAAA,
@ -142,7 +137,7 @@ impl std::fmt::Display for DnsRecordType {
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct DnsRecord {
pub host: String,
pub domain: String,
@ -258,7 +253,7 @@ mod test {
}
fn remove_record(
&mut self,
&self,
_name: &str,
_record_type: DnsRecordType,
) -> Result<(), ExecutorError> {

View File

@ -3,8 +3,9 @@ use crate::topology::IpAddress;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct HPIlo {
ip_address: Option<IpAddress>,
mac_address: Option<MacAddress>,

View File

@ -2,8 +2,9 @@ use crate::hardware::ManagementInterface;
use derive_new::new;
use harmony_types::net::MacAddress;
use log::info;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct IntelAmtManagement {
mac_address: MacAddress,
}

View File

@ -30,7 +30,7 @@ impl DnsServer for OPNSenseFirewall {
}
fn remove_record(
&mut self,
&self,
_name: &str,
_record_type: crate::topology::DnsRecordType,
) -> Result<(), ExecutorError> {

View File

@ -22,7 +22,7 @@ impl HttpServer for OPNSenseFirewall {
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}
Url::Remote(_url) => todo!(),
Url::Url(_url) => todo!(),
}
Ok(())
}

View File

@ -1,7 +1,8 @@
use crate::hardware::ManagementInterface;
use derive_new::new;
use serde::Serialize;
#[derive(new)]
#[derive(new, Serialize)]
pub struct OPNSenseManagementInterface {}
impl ManagementInterface for OPNSenseManagementInterface {

View File

@ -22,7 +22,7 @@ impl TftpServer for OPNSenseFirewall {
.await
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
}
Url::Remote(url) => todo!("This url is not supported yet {url}"),
Url::Url(url) => todo!("This url is not supported yet {url}"),
}
Ok(())
}

View File

@ -1,19 +1,18 @@
use std::sync::Arc;
use async_trait::async_trait;
use derive_new::new;
use log::info;
use serde::Serialize;
use crate::{
domain::{data::Version, interpret::InterpretStatus},
interpret::{Interpret, InterpretError, InterpretName, Outcome},
inventory::Inventory,
topology::{DHCPStaticEntry, HAClusterTopology, HostBinding, IpAddress},
topology::{DHCPStaticEntry, DhcpServer, HostBinding, IpAddress, Topology},
};
use crate::domain::score::Score;
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct DhcpScore {
pub host_binding: Vec<HostBinding>,
pub next_server: Option<IpAddress>,
@ -23,18 +22,14 @@ pub struct DhcpScore {
pub filenameipxe: Option<String>,
}
impl Score for DhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DhcpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"DhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
@ -55,10 +50,10 @@ impl DhcpInterpret {
status: InterpretStatus::QUEUED,
}
}
async fn add_static_entries(
async fn add_static_entries<D: DhcpServer>(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
dhcp_server: &D,
) -> Result<Outcome, InterpretError> {
let dhcp_entries: Vec<DHCPStaticEntry> = self
.score
@ -81,7 +76,6 @@ impl DhcpInterpret {
.collect();
info!("DHCPStaticEntry : {:?}", dhcp_entries);
let dhcp_server = Arc::new(topology.dhcp_server.clone());
info!("DHCP server : {:?}", dhcp_server);
let number_new_entries = dhcp_entries.len();
@ -99,14 +93,13 @@ impl DhcpInterpret {
))
}
async fn set_pxe_options(
async fn set_pxe_options<D: DhcpServer>(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
dhcp_server: &D,
) -> Result<Outcome, InterpretError> {
let next_server_outcome = match self.score.next_server {
Some(next_server) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_next_server(next_server).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@ -118,7 +111,6 @@ impl DhcpInterpret {
let boot_filename_outcome = match &self.score.boot_filename {
Some(boot_filename) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_boot_filename(&boot_filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@ -130,7 +122,6 @@ impl DhcpInterpret {
let filename_outcome = match &self.score.filename {
Some(filename) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_filename(&filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@ -142,7 +133,6 @@ impl DhcpInterpret {
let filename64_outcome = match &self.score.filename64 {
Some(filename64) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_filename64(&filename64).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@ -154,7 +144,6 @@ impl DhcpInterpret {
let filenameipxe_outcome = match &self.score.filenameipxe {
Some(filenameipxe) => {
let dhcp_server = Arc::new(topology.dhcp_server.clone());
dhcp_server.set_filenameipxe(&filenameipxe).await?;
Outcome::new(
InterpretStatus::SUCCESS,
@ -184,7 +173,7 @@ impl DhcpInterpret {
}
#[async_trait]
impl Interpret for DhcpInterpret {
impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDHCP
}
@ -204,15 +193,15 @@ impl Interpret for DhcpInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!("Executing {} on inventory {inventory:?}", self.get_name());
info!("Executing DhcpInterpret on inventory {inventory:?}");
self.set_pxe_options(inventory, topology).await?;
self.add_static_entries(inventory, topology).await?;
topology.dhcp_server.commit_config().await?;
topology.commit_config().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,

View File

@ -1,33 +1,30 @@
use async_trait::async_trait;
use derive_new::new;
use log::info;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{DnsRecord, HAClusterTopology},
topology::{DnsRecord, DnsServer, Topology},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct DnsScore {
dns_entries: Vec<DnsRecord>,
register_dhcp_leases: Option<bool>,
}
impl Score for DnsScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DnsServer> Score<T> for DnsScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DnsInterpret::new(self.clone()))
}
fn name(&self) -> String {
"DnsScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
// https://docs.opnsense.org/manual/dhcp.html#advanced-settings
@ -48,12 +45,11 @@ impl DnsInterpret {
status: InterpretStatus::QUEUED,
}
}
async fn serve_dhcp_entries(
async fn serve_dhcp_entries<T: DnsServer>(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
dns: &T,
) -> Result<Outcome, InterpretError> {
let dns = topology.dns_server.clone();
if let Some(register) = self.score.register_dhcp_leases {
dns.register_dhcp_leases(register).await?;
}
@ -64,15 +60,12 @@ impl DnsInterpret {
))
}
async fn ensure_hosts_registered(
async fn ensure_hosts_registered<D: DnsServer>(
&self,
topology: &HAClusterTopology,
dns_server: &D,
) -> Result<Outcome, InterpretError> {
let entries = &self.score.dns_entries;
topology
.dns_server
.ensure_hosts_registered(entries.clone())
.await?;
dns_server.ensure_hosts_registered(entries.clone()).await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,
@ -85,7 +78,7 @@ impl DnsInterpret {
}
#[async_trait]
impl Interpret for DnsInterpret {
impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::OPNSenseDns
}
@ -105,14 +98,17 @@ impl Interpret for DnsInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!("Executing {} on inventory {inventory:?}", self.get_name());
info!(
"Executing {} on inventory {inventory:?}",
<DnsInterpret as Interpret<T>>::get_name(self)
);
self.serve_dhcp_entries(inventory, topology).await?;
self.ensure_hosts_registered(&topology).await?;
self.ensure_hosts_registered(topology).await?;
topology.dns_server.commit_config().await?;
topology.commit_config().await?;
Ok(Outcome::new(
InterpretStatus::SUCCESS,

View File

@ -1,20 +1,21 @@
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
/// Score that always errors. This is only useful for development/testing purposes. It does nothing
/// except returning Err(InterpretError) when interpreted.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct ErrorScore;
impl Score for ErrorScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
impl<T: Topology> Score<T> for ErrorScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(DummyInterpret {
result: Err(InterpretError::new("Error Score default error".to_string())),
status: InterpretStatus::QUEUED,
@ -24,19 +25,15 @@ impl Score for ErrorScore {
fn name(&self) -> String {
"ErrorScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// Score that always succeeds. This is only useful for development/testing purposes. It does nothing
/// except returning Ok(Outcome::success) when interpreted.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct SuccessScore;
impl Score for SuccessScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
impl<T: Topology> Score<T> for SuccessScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DummyInterpret {
result: Ok(Outcome::success("SuccessScore default success".to_string())),
status: InterpretStatus::QUEUED,
@ -46,10 +43,6 @@ impl Score for SuccessScore {
fn name(&self) -> String {
"SuccessScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// An interpret that only returns the result it is given when built. It does nothing else. Only
@ -61,7 +54,7 @@ struct DummyInterpret {
}
#[async_trait]
impl Interpret for DummyInterpret {
impl<T: Topology> Interpret<T> for DummyInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Dummy
}
@ -81,7 +74,7 @@ impl Interpret for DummyInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
self.result.clone()
}
@ -89,21 +82,17 @@ impl Interpret for DummyInterpret {
/// Score that always panics. This is only useful for development/testing purposes. It does nothing
/// except panic! with an error message when interpreted
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct PanicScore;
impl Score for PanicScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret> {
impl<T: Topology> Score<T> for PanicScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(PanicInterpret {})
}
fn name(&self) -> String {
"PanicScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
/// An interpret that always panics when executed. Useful for development/testing purposes.
@ -111,7 +100,7 @@ impl Score for PanicScore {
struct PanicInterpret;
#[async_trait]
impl Interpret for PanicInterpret {
impl<T: Topology> Interpret<T> for PanicInterpret {
fn get_name(&self) -> InterpretName {
InterpretName::Panic
}
@ -131,7 +120,7 @@ impl Interpret for PanicInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
panic!("Panic interpret always panics when executed")
}

View File

@ -0,0 +1,157 @@
use crate::data::{Id, Version};
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
use crate::inventory::Inventory;
use crate::score::Score;
use crate::topology::{HelmCommand, Topology};
use async_trait::async_trait;
use helm_wrapper_rs;
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
use log::info;
pub use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::collections::HashMap;
use std::path::Path;
use std::str::FromStr;
use temp_file::TempFile;
#[derive(Debug, Clone, Serialize)]
pub struct HelmChartScore {
pub namespace: Option<NonBlankString>,
pub release_name: NonBlankString,
pub chart_name: NonBlankString,
pub chart_version: Option<NonBlankString>,
pub values_overrides: Option<HashMap<NonBlankString, String>>,
pub values_yaml: Option<String>,
pub create_namespace: bool,
/// Wether to run `helm upgrade --install` under the hood or only install when not present
pub install_only: bool,
}
impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HelmChartInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
format!("{} {} HelmChartScore", self.release_name, self.chart_name)
}
}
#[derive(Debug, Serialize)]
pub struct HelmChartInterpret {
pub score: HelmChartScore,
}
#[async_trait]
impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let ns = self
.score
.namespace
.as_ref()
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
let tf: TempFile;
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
Some(yaml_str) => {
tf = temp_file::with_contents(yaml_str.as_bytes());
Some(tf.path())
}
None => None,
};
let helm_executor = DefaultHelmExecutor::new();
let mut helm_options = Vec::new();
if self.score.create_namespace {
helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
}
if self.score.install_only {
let chart_list = match helm_executor.list(Some(ns)) {
Ok(charts) => charts,
Err(e) => {
return Err(InterpretError::new(format!(
"Failed to list scores in namespace {:?} because of error : {}",
self.score.namespace, e
)));
}
};
if chart_list
.iter()
.any(|item| item.name == self.score.release_name.to_string())
{
info!(
"Release '{}' already exists in namespace '{}'. Skipping installation as install_only is true.",
self.score.release_name, ns
);
return Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!(
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
self.score.release_name
),
));
} else {
info!(
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
self.score.release_name, ns
);
}
}
let res = helm_executor.install_or_upgrade(
&ns,
&self.score.release_name,
&self.score.chart_name,
self.score.chart_version.as_ref(),
self.score.values_overrides.as_ref(),
yaml_path,
Some(&helm_options),
);
let status = match res {
Ok(status) => status,
Err(err) => return Err(InterpretError::new(err.to_string())),
};
match status {
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Helm Chart deployed".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
InterpretStatus::RUNNING,
"Helm Chart Pending install".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
InterpretStatus::RUNNING,
"Helm Chart pending upgrade".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(
"Failed to install helm chart".to_string(),
)),
}
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@ -0,0 +1 @@
pub mod chart;

View File

@ -1,31 +1,28 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, Url},
topology::{HttpServer, Topology, Url},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct HttpScore {
files_to_serve: Url,
}
impl Score for HttpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + HttpServer> Score<T> for HttpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(HttpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"HttpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
@ -34,13 +31,12 @@ pub struct HttpInterpret {
}
#[async_trait]
impl Interpret for HttpInterpret {
impl<T: Topology + HttpServer> Interpret<T> for HttpInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
http_server: &T,
) -> Result<Outcome, InterpretError> {
let http_server = &topology.http_server;
http_server.ensure_initialized().await?;
// http_server.set_ip(topology.router.get_gateway()).await?;
http_server.serve_files(&self.score.files_to_serve).await?;

View File

@ -1,44 +1,41 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct IpxeScore {
//files_to_serve: Url,
}
impl Score for IpxeScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology> Score<T> for IpxeScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(IpxeInterpret::new(self.clone()))
}
fn name(&self) -> String {
"IpxeScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
pub struct IpxeInterpret {
score: IpxeScore,
_score: IpxeScore,
}
#[async_trait]
impl Interpret for IpxeInterpret {
impl<T: Topology> Interpret<T> for IpxeInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
/*
let http_server = &topology.http_server;
@ -48,12 +45,7 @@ impl Interpret for IpxeInterpret {
self.score.files_to_serve
)))
*/
Ok(Outcome::success(format!(
"Success running {}",
self.score.name()
)))
//Ok(Outcome::success("Success".to_string()))
todo!();
}
fn get_name(&self) -> InterpretName {

View File

@ -0,0 +1,82 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::info;
use serde::Serialize;
use crate::{
config::HARMONY_CONFIG_DIR,
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::Topology,
};
#[derive(Debug, Clone, Serialize)]
pub struct K3DInstallationScore {
pub installation_path: PathBuf,
pub cluster_name: String,
}
impl Default for K3DInstallationScore {
fn default() -> Self {
Self {
installation_path: HARMONY_CONFIG_DIR.join("k3d"),
cluster_name: "harmony".to_string(),
}
}
}
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(K3dInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
todo!()
}
}
#[derive(Debug)]
pub struct K3dInstallationInterpret {
score: K3DInstallationScore,
}
#[async_trait]
impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let k3d = k3d_rs::K3d::new(
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
match k3d.ensure_installed().await {
Ok(_client) => {
let msg = format!("k3d cluster {} is installed ", self.score.cluster_name);
info!("{msg}");
Ok(Outcome::success(msg))
}
Err(msg) => Err(InterpretError::new(format!(
"K3dInstallationInterpret failed to ensure k3d is installed : {msg}"
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::K3dInstallation
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@ -0,0 +1,2 @@
mod install;
pub use install::*;

View File

@ -1,19 +1,26 @@
use k8s_openapi::api::apps::v1::Deployment;
use serde::Serialize;
use serde_json::json;
use crate::{interpret::Interpret, score::Score};
use crate::{
interpret::Interpret,
score::Score,
topology::{K8sclient, Topology},
};
use super::resource::{K8sResourceInterpret, K8sResourceScore};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct K8sDeploymentScore {
pub name: String,
pub image: String,
pub namespace: Option<String>,
pub env_vars: serde_json::Value,
}
impl Score for K8sDeploymentScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
let deployment: Deployment = serde_json::from_value(json!(
impl<T: Topology + K8sclient> Score<T> for K8sDeploymentScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
let deployment = json!(
{
"metadata": {
"name": self.name
@ -33,26 +40,25 @@ impl Score for K8sDeploymentScore {
"spec": {
"containers": [
{
"image": self.image,
"name": self.image
"image": self.image,
"name": self.name,
"imagePullPolicy": "IfNotPresent",
"env": self.env_vars,
}
]
}
}
}
}
))
.unwrap();
);
let deployment: Deployment = serde_json::from_value(deployment).unwrap();
Box::new(K8sResourceInterpret {
score: K8sResourceScore::single(deployment.clone()),
score: K8sResourceScore::single(deployment.clone(), self.namespace.clone()),
})
}
fn name(&self) -> String {
"K8sDeploymentScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@ -1,2 +1,3 @@
pub mod deployment;
pub mod namespace;
pub mod resource;

View File

@ -0,0 +1,46 @@
use k8s_openapi::api::core::v1::Namespace;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use serde_json::json;
use crate::{
interpret::Interpret,
score::Score,
topology::{K8sclient, Topology},
};
#[derive(Debug, Clone, Serialize)]
pub struct K8sNamespaceScore {
pub name: Option<NonBlankString>,
}
impl<T: Topology + K8sclient> Score<T> for K8sNamespaceScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
let name = match &self.name {
Some(name) => name,
None => todo!(
"Return NoOp interpret when no namespace specified or something that makes sense"
),
};
let _namespace: Namespace = serde_json::from_value(json!(
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": name,
},
}
))
.unwrap();
todo!(
"We currently only support namespaced ressources (see Scope = NamespaceResourceScope)"
);
// Box::new(K8sResourceInterpret {
// score: K8sResourceScore::single(namespace.clone()),
// })
}
fn name(&self) -> String {
"K8sNamespaceScore".to_string()
}
}

View File

@ -1,25 +1,27 @@
use async_trait::async_trait;
use k8s_openapi::NamespaceResourceScope;
use kube::Resource;
use serde::de::DeserializeOwned;
use serde::{Serialize, de::DeserializeOwned};
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::{K8sclient, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct K8sResourceScore<K: Resource + std::fmt::Debug> {
pub resource: Vec<K>,
pub namespace: Option<String>,
}
impl<K: Resource + std::fmt::Debug> K8sResourceScore<K> {
pub fn single(resource: K) -> Self {
pub fn single(resource: K, namespace: Option<String>) -> Self {
Self {
resource: vec![resource],
namespace,
}
}
}
@ -34,21 +36,18 @@ impl<
+ 'static
+ Send
+ Clone,
> Score for K8sResourceScore<K>
T: Topology,
> Score<T> for K8sResourceScore<K>
where
<K as kube::Resource>::DynamicType: Default,
{
fn create_interpret(&self) -> Box<dyn Interpret> {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
todo!()
}
fn name(&self) -> String {
"K8sResourceScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
@ -66,20 +65,21 @@ impl<
+ Default
+ Send
+ Sync,
> Interpret for K8sResourceInterpret<K>
T: Topology + K8sclient,
> Interpret<T> for K8sResourceInterpret<K>
where
<K as kube::Resource>::DynamicType: Default,
{
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
topology
.oc_client()
.k8s_client()
.await
.expect("Environment should provide enough information to instanciate a client")
.apply_namespaced(&self.score.resource)
.apply_namespaced(&self.score.resource, self.score.namespace.as_deref())
.await?;
Ok(Outcome::success(

385
harmony/src/modules/lamp.rs Normal file
View File

@ -0,0 +1,385 @@
use convert_case::{Case, Casing};
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, WORKDIR};
use dockerfile_builder::{Dockerfile, instruction_builder::EnvBuilder};
use non_blank_string_rs::NonBlankString;
use serde_json::json;
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use async_trait::async_trait;
use log::{debug, info};
use serde::Serialize;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::topology::HelmCommand;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::k8s::deployment::K8sDeploymentScore,
score::Score,
topology::{K8sclient, Topology, Url},
};
use super::helm::chart::HelmChartScore;
#[derive(Debug, Clone, Serialize)]
pub struct LAMPScore {
pub name: String,
pub domain: Url,
pub config: LAMPConfig,
pub php_version: Version,
}
#[derive(Debug, Clone, Serialize)]
pub struct LAMPConfig {
pub project_root: PathBuf,
pub ssl_enabled: bool,
pub database_size: Option<String>,
}
impl Default for LAMPConfig {
fn default() -> Self {
LAMPConfig {
project_root: Path::new("./src").to_path_buf(),
ssl_enabled: true,
database_size: None,
}
}
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for LAMPScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(LAMPInterpret {
score: self.clone(),
namespace: "harmony-lamp".to_string(),
})
}
fn name(&self) -> String {
"LampScore".to_string()
}
}
#[derive(Debug)]
pub struct LAMPInterpret {
score: LAMPScore,
namespace: String,
}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let image_name = match self.build_docker_image() {
Ok(name) => name,
Err(e) => {
return Err(InterpretError::new(format!(
"Could not build LAMP docker image {e}"
)));
}
};
info!("LAMP docker image built {image_name}");
let remote_name = match self.push_docker_image(&image_name) {
Ok(remote_name) => remote_name,
Err(e) => {
return Err(InterpretError::new(format!(
"Could not push docker image {e}"
)));
}
};
info!("LAMP docker image pushed to {remote_name}");
info!("Deploying database");
self.deploy_database(inventory, topology).await?;
let base_name = self.score.name.to_case(Case::Kebab);
let secret_name = format!("{}-database-mariadb", base_name);
let deployment_score = K8sDeploymentScore {
name: <LAMPScore as Score<T>>::name(&self.score).to_case(Case::Kebab),
image: remote_name,
namespace: self.get_namespace().map(|nbs| nbs.to_string()),
env_vars: json!([
{
"name": "MYSQL_PASSWORD",
"valueFrom": {
"secretKeyRef": {
"name": secret_name,
"key": "mariadb-root-password"
}
}
},
{
"name": "MYSQL_HOST",
"value": secret_name
},
]),
};
info!("Deploying score {deployment_score:#?}");
deployment_score
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP deployment_score {deployment_score:?}");
todo!("1. [x] Use HelmChartScore to deploy mariadb
2. [x] Use deploymentScore to deploy lamp docker container
3. for remote clusters, push the image to some registry (use nationtech's for demos? push to the cluster's registry?)");
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl LAMPInterpret {
async fn deploy_database<T: Topology + K8sclient + HelmCommand>(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let mut values_overrides = HashMap::new();
if let Some(database_size) = self.score.config.database_size.clone() {
values_overrides.insert(
NonBlankString::from_str("primary.persistence.size").unwrap(),
database_size,
);
}
let score = HelmChartScore {
namespace: self.get_namespace(),
release_name: NonBlankString::from_str(&format!("{}-database", self.score.name))
.unwrap(),
chart_name: NonBlankString::from_str(
"oci://registry-1.docker.io/bitnamicharts/mariadb",
)
.unwrap(),
chart_version: None,
values_overrides: Some(values_overrides),
create_namespace: true,
install_only: true,
values_yaml: None,
};
score.create_interpret().execute(inventory, topology).await
}
fn build_dockerfile(&self, score: &LAMPScore) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut dockerfile = Dockerfile::new();
// Use the PHP version from the score to determine the base image
let php_version = score.php_version.to_string();
let php_major_minor = php_version
.split('.')
.take(2)
.collect::<Vec<&str>>()
.join(".");
// Base image selection - using official PHP image with Apache
dockerfile.push(FROM::from(format!("php:{}-apache", php_major_minor)));
// Set environment variables for PHP configuration
dockerfile.push(ENV::from("PHP_MEMORY_LIMIT=256M"));
dockerfile.push(ENV::from("PHP_MAX_EXECUTION_TIME=30"));
dockerfile.push(
EnvBuilder::builder()
.key("PHP_ERROR_REPORTING")
.value("\"E_ERROR | E_WARNING | E_PARSE\"")
.build()
.unwrap(),
);
// Install necessary PHP extensions and dependencies
dockerfile.push(RUN::from(
"apt-get update && \
apt-get install -y --no-install-recommends \
libfreetype6-dev \
libjpeg62-turbo-dev \
libpng-dev \
libzip-dev \
unzip \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*",
));
dockerfile.push(RUN::from(
"docker-php-ext-configure gd --with-freetype --with-jpeg && \
docker-php-ext-install -j$(nproc) \
gd \
mysqli \
pdo_mysql \
zip \
opcache",
));
// Copy PHP configuration
dockerfile.push(RUN::from("mkdir -p /usr/local/etc/php/conf.d/"));
// Create and copy a custom PHP configuration
let php_config = r#"
memory_limit = ${PHP_MEMORY_LIMIT}
max_execution_time = ${PHP_MAX_EXECUTION_TIME}
error_reporting = ${PHP_ERROR_REPORTING}
display_errors = Off
log_errors = On
error_log = /dev/stderr
date.timezone = UTC
; Opcache configuration for production
opcache.enable=1
opcache.memory_consumption=128
opcache.interned_strings_buffer=8
opcache.max_accelerated_files=4000
opcache.revalidate_freq=2
opcache.fast_shutdown=1
"#;
// Save this configuration to a temporary file within the project root
let config_path = Path::new(&score.config.project_root).join("docker-php.ini");
fs::write(&config_path, php_config)?;
// Reference the file within the Docker context (where the build runs)
dockerfile.push(COPY::from(
"docker-php.ini /usr/local/etc/php/conf.d/docker-php.ini",
));
// Security hardening
dockerfile.push(RUN::from(
"a2enmod headers && \
a2enmod rewrite && \
sed -i 's/ServerTokens OS/ServerTokens Prod/' /etc/apache2/conf-enabled/security.conf && \
sed -i 's/ServerSignature On/ServerSignature Off/' /etc/apache2/conf-enabled/security.conf"
));
// Set env vars
dockerfile.push(RUN::from(
"echo 'PassEnv MYSQL_PASSWORD' >> /etc/apache2/sites-available/000-default.conf \
&& echo 'PassEnv MYSQL_USER' >> /etc/apache2/sites-available/000-default.conf \
&& echo 'PassEnv MYSQL_HOST' >> /etc/apache2/sites-available/000-default.conf",
));
// Create a dedicated user for running Apache
dockerfile.push(RUN::from(
"groupadd -g 1000 appuser && \
useradd -u 1000 -g appuser -m -s /bin/bash appuser && \
chown -R appuser:appuser /var/www/html",
));
// Set the working directory
dockerfile.push(WORKDIR::from("/var/www/html"));
// Copy application code from the project root to the container
// Note: In Dockerfile, the COPY context is relative to the build context
// We'll handle the actual context in the build_docker_image method
dockerfile.push(COPY::from(". /var/www/html"));
// Fix permissions
dockerfile.push(RUN::from("chown -R appuser:appuser /var/www/html"));
// Expose Apache port
dockerfile.push(EXPOSE::from("80/tcp"));
// Set the default command
dockerfile.push(CMD::from("apache2-foreground"));
// Save the Dockerfile to disk in the project root
let dockerfile_path = Path::new(&score.config.project_root).join("Dockerfile");
fs::write(&dockerfile_path, dockerfile.to_string())?;
Ok(dockerfile_path)
}
fn check_output(
&self,
output: &std::process::Output,
msg: &str,
) -> Result<(), Box<dyn std::error::Error>> {
if !output.status.success() {
return Err(format!("{msg}: {}", String::from_utf8_lossy(&output.stderr)).into());
}
Ok(())
}
fn push_docker_image(&self, image_name: &str) -> Result<String, Box<dyn std::error::Error>> {
let full_tag = format!("{}/{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT, &image_name);
let output = std::process::Command::new("docker")
.args(["tag", image_name, &full_tag])
.output()?;
self.check_output(&output, "Tagging docker image failed")?;
debug!(
"docker tag output {} {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
let output = std::process::Command::new("docker")
.args(["push", &full_tag])
.output()?;
self.check_output(&output, "Pushing docker image failed")?;
debug!(
"docker push output {} {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
Ok(full_tag)
}
pub fn build_docker_image(&self) -> Result<String, Box<dyn std::error::Error>> {
info!("Generating Dockerfile");
let dockerfile = self.build_dockerfile(&self.score)?;
info!(
"Building Docker image with file {} from root {}",
dockerfile.to_string_lossy(),
self.score.config.project_root.to_string_lossy()
);
let image_name = format!("{}-php-apache", self.score.name);
let project_root = &self.score.config.project_root;
let output = std::process::Command::new("docker")
.args([
"build",
"--file",
dockerfile.to_str().unwrap(),
"-t",
&image_name,
project_root.to_str().unwrap(),
])
.output()?;
if !output.status.success() {
return Err(format!(
"Failed to build Docker image: {}",
String::from_utf8_lossy(&output.stderr)
)
.into());
}
Ok(image_name)
}
fn get_namespace(&self) -> Option<NonBlankString> {
Some(NonBlankString::from_str(&self.namespace).unwrap())
}
}

View File

@ -1,15 +1,16 @@
use async_trait::async_trait;
use log::info;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, LoadBalancerService},
topology::{LoadBalancer, LoadBalancerService, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct LoadBalancerScore {
pub public_services: Vec<LoadBalancerService>,
pub private_services: Vec<LoadBalancerService>,
@ -19,18 +20,14 @@ pub struct LoadBalancerScore {
// uuid?
}
impl Score for LoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + LoadBalancer> Score<T> for LoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(LoadBalancerInterpret::new(self.clone()))
}
fn name(&self) -> String {
"LoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
@ -51,37 +48,32 @@ impl LoadBalancerInterpret {
}
#[async_trait]
impl Interpret for LoadBalancerInterpret {
impl<T: Topology + LoadBalancer> Interpret<T> for LoadBalancerInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
load_balancer: &T,
) -> Result<Outcome, InterpretError> {
info!(
"Making sure Load Balancer is initialized: {:?}",
topology.load_balancer.ensure_initialized().await?
load_balancer.ensure_initialized().await?
);
for service in self.score.public_services.iter() {
info!("Ensuring service exists {service:?}");
topology
.load_balancer
.ensure_service_exists(service)
.await?;
load_balancer.ensure_service_exists(service).await?;
}
for service in self.score.private_services.iter() {
info!("Ensuring private service exists {service:?}");
topology
.load_balancer
.ensure_service_exists(service)
.await?;
load_balancer.ensure_service_exists(service).await?;
}
info!("Applying load balancer configuration");
topology.load_balancer.commit_config().await?;
load_balancer.commit_config().await?;
info!("Making a full reload and restart of haproxy");
topology.load_balancer.reload_restart().await?;
load_balancer.reload_restart().await?;
Ok(Outcome::success(format!(
"Load balancer successfully configured {} services",
self.score.public_services.len() + self.score.private_services.len()

View File

@ -1,8 +1,11 @@
pub mod dhcp;
pub mod dns;
pub mod dummy;
pub mod helm;
pub mod http;
pub mod k3d;
pub mod k8s;
pub mod lamp;
pub mod load_balancer;
pub mod okd;
pub mod opnsense;

View File

@ -1,12 +1,14 @@
use serde::Serialize;
use crate::{
interpret::Interpret,
inventory::Inventory,
modules::dhcp::DhcpScore,
score::Score,
topology::{HAClusterTopology, HostBinding},
topology::{DhcpServer, HAClusterTopology, HostBinding, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDBootstrapDhcpScore {
dhcp_score: DhcpScore,
}
@ -50,16 +52,12 @@ impl OKDBootstrapDhcpScore {
}
}
impl Score for OKDBootstrapDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DhcpServer> Score<T> for OKDBootstrapDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.dhcp_score.create_interpret()
}
fn name(&self) -> String {
"OKDBootstrapDhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@ -1,16 +1,18 @@
use std::net::SocketAddr;
use serde::Serialize;
use crate::{
interpret::Interpret,
modules::load_balancer::LoadBalancerScore,
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode,
LoadBalancerService,
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
LoadBalancerService, Topology,
},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDBootstrapLoadBalancerScore {
load_balancer_score: LoadBalancerScore,
}
@ -69,16 +71,12 @@ impl OKDBootstrapLoadBalancerScore {
}
}
impl Score for OKDBootstrapLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + LoadBalancer> Score<T> for OKDBootstrapLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.load_balancer_score.create_interpret()
}
fn name(&self) -> String {
"OKDBootstrapLoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@ -1,12 +1,14 @@
use serde::Serialize;
use crate::{
interpret::Interpret,
inventory::Inventory,
modules::dhcp::DhcpScore,
score::Score,
topology::{HAClusterTopology, HostBinding},
topology::{DhcpServer, HAClusterTopology, HostBinding, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDDhcpScore {
dhcp_score: DhcpScore,
}
@ -60,16 +62,12 @@ impl OKDDhcpScore {
}
}
impl Score for OKDDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DhcpServer> Score<T> for OKDDhcpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.dhcp_score.create_interpret()
}
fn name(&self) -> String {
"OKDDhcpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@ -1,11 +1,13 @@
use serde::Serialize;
use crate::{
interpret::Interpret,
modules::dns::DnsScore,
score::Score,
topology::{DnsRecord, DnsRecordType, HAClusterTopology},
topology::{DnsRecord, DnsRecordType, DnsServer, HAClusterTopology, Topology},
};
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize)]
pub struct OKDDnsScore {
dns_score: DnsScore,
}
@ -40,16 +42,12 @@ impl OKDDnsScore {
}
}
impl Score for OKDDnsScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + DnsServer> Score<T> for OKDDnsScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.dns_score.create_interpret()
}
fn name(&self) -> String {
"OKDDnsScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@ -1,16 +1,24 @@
use std::net::SocketAddr;
use serde::Serialize;
use crate::{
interpret::Interpret,
modules::load_balancer::LoadBalancerScore,
score::Score,
topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode,
LoadBalancerService,
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
LoadBalancerService, Topology,
},
};
#[derive(Debug, Clone)]
impl std::fmt::Display for OKDLoadBalancerScore {
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
todo!()
}
}
#[derive(Debug, Clone, Serialize)]
pub struct OKDLoadBalancerScore {
load_balancer_score: LoadBalancerScore,
}
@ -80,16 +88,12 @@ impl OKDLoadBalancerScore {
}
}
impl Score for OKDLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + LoadBalancer> Score<T> for OKDLoadBalancerScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
self.load_balancer_score.create_interpret()
}
fn name(&self) -> String {
"OKDLoadBalancerScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@ -1,16 +1,16 @@
use crate::{data::Version, score::Score};
use crate::data::Version;
#[derive(Debug, Clone)]
pub struct OKDUpgradeScore {
current_version: Version,
target_version: Version,
_current_version: Version,
_target_version: Version,
}
impl OKDUpgradeScore {
pub fn new() -> Self {
Self {
current_version: Version::from("4.17.0-okd-scos.0").unwrap(),
target_version: Version::from("").unwrap(),
_current_version: Version::from("4.17.0-okd-scos.0").unwrap(),
_target_version: Version::from("").unwrap(),
}
}
}

View File

@ -1,6 +1,4 @@
mod shell;
mod upgrade;
pub use shell::*;
pub use upgrade::*;

View File

@ -1,6 +1,7 @@
use std::sync::Arc;
use async_trait::async_trait;
use serde::Serialize;
use tokio::sync::RwLock;
use crate::{
@ -8,17 +9,34 @@ use crate::{
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::HAClusterTopology,
topology::Topology,
};
#[derive(Debug, Clone)]
pub struct OPNsenseShellCommandScore {
// TODO I am pretty sure we should not hold a direct reference to the
// opnsense_config::Config here.
// This causes a problem with serialization but also could cause many more problems as this
// is mixing concerns of configuration (which is the Responsibility of Scores to define)
// and state/execution which is the responsibility of interprets via topologies to manage
//
// I feel like a better solution would be for this Score/Interpret to require
// Topology + OPNSenseShell trait bindings
pub opnsense: Arc<RwLock<opnsense_config::Config>>,
pub command: String,
}
impl Score for OPNsenseShellCommandScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl Serialize for OPNsenseShellCommandScore {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!("See comment about moving opnsense_config::Config outside the score")
}
}
impl<T: Topology> Score<T> for OPNsenseShellCommandScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OPNsenseShellInterpret {
status: InterpretStatus::QUEUED,
score: self.clone(),
@ -28,10 +46,6 @@ impl Score for OPNsenseShellCommandScore {
fn name(&self) -> String {
"OPNSenseShellCommandScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug)]
@ -41,11 +55,11 @@ pub struct OPNsenseShellInterpret {
}
#[async_trait]
impl Interpret for OPNsenseShellInterpret {
impl<T: Topology> Interpret<T> for OPNsenseShellInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &HAClusterTopology,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let output = self
.score

View File

@ -1,10 +1,12 @@
use std::sync::Arc;
use serde::Serialize;
use tokio::sync::RwLock;
use crate::{
interpret::{Interpret, InterpretStatus},
score::Score,
topology::Topology,
};
use super::{OPNsenseShellCommandScore, OPNsenseShellInterpret};
@ -14,8 +16,17 @@ pub struct OPNSenseLaunchUpgrade {
pub opnsense: Arc<RwLock<opnsense_config::Config>>,
}
impl Score for OPNSenseLaunchUpgrade {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl Serialize for OPNSenseLaunchUpgrade {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
todo!("See comment in OPNSenseShellCommandScore and apply the same idea here")
}
}
impl<T: Topology> Score<T> for OPNSenseLaunchUpgrade {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
let score = OPNsenseShellCommandScore {
opnsense: self.opnsense.clone(),
command: "/usr/local/opnsense/scripts/firmware/update.sh".to_string(),
@ -30,8 +41,4 @@ impl Score for OPNSenseLaunchUpgrade {
fn name(&self) -> String {
"OPNSenseLaunchUpgrade".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}

View File

@ -1,31 +1,28 @@
use async_trait::async_trait;
use derive_new::new;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HAClusterTopology, Url},
topology::{Router, TftpServer, Topology, Url},
};
#[derive(Debug, new, Clone)]
#[derive(Debug, new, Clone, Serialize)]
pub struct TftpScore {
files_to_serve: Url,
}
impl Score for TftpScore {
fn create_interpret(&self) -> Box<dyn Interpret> {
impl<T: Topology + TftpServer + Router> Score<T> for TftpScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(TftpInterpret::new(self.clone()))
}
fn name(&self) -> String {
"TftpScore".to_string()
}
fn clone_box(&self) -> Box<dyn Score> {
Box::new(self.clone())
}
}
#[derive(Debug, new, Clone)]
@ -34,18 +31,17 @@ pub struct TftpInterpret {
}
#[async_trait]
impl Interpret for TftpInterpret {
impl<T: Topology + TftpServer + Router> Interpret<T> for TftpInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &HAClusterTopology,
topology: &T,
) -> Result<Outcome, InterpretError> {
let tftp_server = &topology.tftp_server;
tftp_server.ensure_initialized().await?;
tftp_server.set_ip(topology.router.get_gateway()).await?;
tftp_server.serve_files(&self.score.files_to_serve).await?;
tftp_server.commit_config().await?;
tftp_server.reload_restart().await?;
topology.ensure_initialized().await?;
topology.set_ip(topology.get_gateway()).await?;
topology.serve_files(&self.score.files_to_serve).await?;
topology.commit_config().await?;
topology.reload_restart().await?;
Ok(Outcome::success(format!(
"TFTP Server running and serving files from {}",
self.score.files_to_serve

20
harmony_cli/Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
name = "harmony_cli"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
assert_cmd = "2.0.17"
clap = { version = "4.5.35", features = ["derive"] }
harmony = { path = "../harmony" }
harmony_tui = { path = "../harmony_tui", optional = true }
inquire.workspace = true
tokio.workspace = true
env_logger.workspace = true
[features]
default = ["tui"]
tui = ["dep:harmony_tui"]

313
harmony_cli/src/lib.rs Normal file
View File

@ -0,0 +1,313 @@
use clap::Parser;
use clap::builder::ArgPredicate;
use harmony;
use harmony::{score::Score, topology::Topology};
use inquire::Confirm;
#[cfg(feature = "tui")]
use harmony_tui;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
pub struct Args {
/// Run score(s) without prompt
#[arg(short, long, default_value_t = false, conflicts_with = "interactive")]
yes: bool,
/// Filter query
#[arg(short, long, conflicts_with = "interactive")]
filter: Option<String>,
/// Run interactive TUI or not
#[arg(short, long, default_value_t = false)]
interactive: bool,
/// Run all or nth, defaults to all
#[arg(
short,
long,
default_value_t = true,
default_value_if("number", ArgPredicate::IsPresent, "false"),
conflicts_with = "number",
conflicts_with = "interactive"
)]
all: bool,
/// Run nth matching, zero indexed
#[arg(short, long, default_value_t = 0, conflicts_with = "interactive")]
number: usize,
/// list scores, will also be affected by run filter
#[arg(short, long, default_value_t = false, conflicts_with = "interactive")]
list: bool,
}
fn maestro_scores_filter<T: Topology>(
maestro: &harmony::maestro::Maestro<T>,
all: bool,
filter: Option<String>,
number: usize,
) -> Vec<Box<dyn Score<T>>> {
let scores = maestro.scores();
let scores_read = scores.read().expect("Should be able to read scores");
let mut scores_vec: Vec<Box<dyn Score<T>>> = match filter {
Some(f) => scores_read
.iter()
.filter(|s| s.name().contains(&f))
.map(|s| s.clone_box())
.collect(),
None => scores_read.iter().map(|s| s.clone_box()).collect(),
};
if !all {
let score = scores_vec.get(number);
match score {
Some(s) => scores_vec = vec![s.clone_box()],
None => return vec![],
}
};
return scores_vec;
}
// TODO: consider adding doctest for this function
fn list_scores_with_index<T: Topology>(scores_vec: &Vec<Box<dyn Score<T>>>) -> String {
let mut display_str = String::new();
for (i, s) in scores_vec.iter().enumerate() {
let name = s.name();
display_str.push_str(&format!("\n{i}: {name}"));
}
return display_str;
}
pub async fn init<T: Topology + Send + Sync + 'static>(
maestro: harmony::maestro::Maestro<T>,
args_struct: Option<Args>,
) -> Result<(), Box<dyn std::error::Error>> {
let args = match args_struct {
Some(args) => args,
None => Args::parse(),
};
#[cfg(feature = "tui")]
if args.interactive {
return harmony_tui::init(maestro).await;
}
#[cfg(not(feature = "tui"))]
if args.interactive {
return Err("Not compiled with interactive support".into());
}
let _ = env_logger::builder().try_init();
let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number);
if scores_vec.len() == 0 {
return Err("No score found".into());
}
// if list option is specified, print filtered list and exit
if args.list {
println!("Available scores:");
println!("{}", list_scores_with_index(&scores_vec));
return Ok(());
}
// prompt user if --yes is not specified
if !args.yes {
let confirmation = Confirm::new(
format!(
"This will run the following scores: {}\n",
list_scores_with_index(&scores_vec)
)
.as_str(),
)
.with_default(true)
.prompt()
.expect("Unexpected prompt error");
if !confirmation {
return Ok(());
}
}
// Run filtered scores
for s in scores_vec {
println!("Running: {}", s.name());
maestro.interpret(s).await?;
}
Ok(())
}
#[cfg(test)]
mod test {
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
topology::HAClusterTopology,
};
fn init_test_maestro() -> Maestro<HAClusterTopology> {
let inventory = Inventory::autoload();
let topology = HAClusterTopology::autoload();
let mut maestro = Maestro::new(inventory, topology);
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
maestro
}
#[tokio::test]
async fn test_init_success_score() {
let maestro = init_test_maestro();
let res = crate::init(
maestro,
Some(crate::Args {
yes: true,
filter: Some("SuccessScore".to_owned()),
interactive: false,
all: true,
number: 0,
list: false,
}),
)
.await;
assert!(res.is_ok());
}
#[tokio::test]
async fn test_init_error_score() {
let maestro = init_test_maestro();
let res = crate::init(
maestro,
Some(crate::Args {
yes: true,
filter: Some("ErrorScore".to_owned()),
interactive: false,
all: true,
number: 0,
list: false,
}),
)
.await;
assert!(res.is_err());
}
#[tokio::test]
async fn test_init_number_score() {
let maestro = init_test_maestro();
let res = crate::init(
maestro,
Some(crate::Args {
yes: true,
filter: None,
interactive: false,
all: false,
number: 0,
list: false,
}),
)
.await;
assert!(res.is_ok());
}
#[tokio::test]
async fn test_filter_fn_all() {
let maestro = init_test_maestro();
let res = crate::maestro_scores_filter(&maestro, true, None, 0);
assert!(res.len() == 3);
}
#[tokio::test]
async fn test_filter_fn_all_success() {
let maestro = init_test_maestro();
let res = crate::maestro_scores_filter(&maestro, true, Some("Success".to_owned()), 0);
assert!(res.len() == 1);
assert!(
maestro
.interpret(res.get(0).unwrap().clone_box())
.await
.is_ok()
);
}
#[tokio::test]
async fn test_filter_fn_all_error() {
let maestro = init_test_maestro();
let res = crate::maestro_scores_filter(&maestro, true, Some("Error".to_owned()), 0);
assert!(res.len() == 1);
assert!(
maestro
.interpret(res.get(0).unwrap().clone_box())
.await
.is_err()
);
}
#[tokio::test]
async fn test_filter_fn_all_score() {
let maestro = init_test_maestro();
let res = crate::maestro_scores_filter(&maestro, true, Some("Score".to_owned()), 0);
assert!(res.len() == 3);
assert!(
maestro
.interpret(res.get(0).unwrap().clone_box())
.await
.is_ok()
);
assert!(
maestro
.interpret(res.get(1).unwrap().clone_box())
.await
.is_err()
);
}
#[tokio::test]
async fn test_filter_fn_number() {
let maestro = init_test_maestro();
let res = crate::maestro_scores_filter(&maestro, false, None, 0);
assert!(res.len() == 1);
assert!(
maestro
.interpret(res.get(0).unwrap().clone_box())
.await
.is_ok()
);
}
#[tokio::test]
async fn test_filter_fn_number_invalid() {
let maestro = init_test_maestro();
let res = crate::maestro_scores_filter(&maestro, false, None, 11);
assert!(res.len() == 0);
}
}

View File

@ -16,3 +16,5 @@ color-eyre = "0.6.3"
tokio-stream = "0.1.17"
tui-logger = "0.14.1"
log-panics = "2.1.0"
serde-value.workspace = true
serde_json = "1.0.140"

View File

@ -1,21 +1,20 @@
mod ratatui_utils;
mod widget;
use log::{debug, error, info};
use tokio::sync::mpsc;
use tokio_stream::StreamExt;
use tui_logger::{TuiWidgetEvent, TuiWidgetState};
use tui_logger::{TuiLoggerFile, TuiWidgetEvent, TuiWidgetState};
use widget::{help::HelpWidget, score::ScoreListWidget};
use std::{panic, sync::Arc, time::Duration};
use crossterm::event::{Event, EventStream, KeyCode, KeyEventKind};
use harmony::{maestro::Maestro, score::Score};
use harmony::{maestro::Maestro, score::Score, topology::Topology};
use ratatui::{
self, Frame,
layout::{Constraint, Layout, Position},
style::{Color, Style},
widgets::{Block, Borders, ListItem},
widgets::{Block, Borders},
};
pub mod tui {
@ -30,36 +29,56 @@ pub mod tui {
///
/// # Example
///
/// ```rust
/// use harmony;
/// use harmony_tui::init;
/// ```rust,no_run
/// use harmony::{
/// inventory::Inventory,
/// maestro::Maestro,
/// modules::dummy::{ErrorScore, PanicScore, SuccessScore},
/// topology::HAClusterTopology,
/// };
///
/// #[harmony::main]
/// pub async fn main(maestro: harmony::Maestro) {
/// maestro.register(DeploymentScore::new("nginx-test", "nginx"));
/// maestro.register(OKDLoadBalancerScore::new(&maestro.inventory, &maestro.topology));
/// // Register other scores as needed
/// #[tokio::main]
/// async fn main() {
/// let inventory = Inventory::autoload();
/// let topology = HAClusterTopology::autoload();
/// let mut maestro = Maestro::new(inventory, topology);
///
/// init(maestro).await.unwrap();
/// maestro.register_all(vec![
/// Box::new(SuccessScore {}),
/// Box::new(ErrorScore {}),
/// Box::new(PanicScore {}),
/// ]);
/// harmony_tui::init(maestro).await.unwrap();
/// }
/// ```
pub async fn init(maestro: Maestro) -> Result<(), Box<dyn std::error::Error>> {
pub async fn init<T: Topology + Send + Sync + 'static>(
maestro: Maestro<T>,
) -> Result<(), Box<dyn std::error::Error>> {
HarmonyTUI::new(maestro).init().await
}
pub struct HarmonyTUI {
score: ScoreListWidget,
pub struct HarmonyTUI<T: Topology> {
score: ScoreListWidget<T>,
should_quit: bool,
tui_state: TuiWidgetState,
}
#[derive(Debug)]
enum HarmonyTuiEvent {
LaunchScore(ScoreItem),
enum HarmonyTuiEvent<T: Topology> {
LaunchScore(Box<dyn Score<T>>),
}
impl HarmonyTUI {
pub fn new(maestro: Maestro) -> Self {
impl<T: Topology> std::fmt::Display for HarmonyTuiEvent<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let output = match self {
HarmonyTuiEvent::LaunchScore(score) => format!("LaunchScore({})", score.name()),
};
f.write_str(&output)
}
}
impl<T: Topology + Send + Sync + 'static> HarmonyTUI<T> {
pub fn new(maestro: Maestro<T>) -> Self {
let maestro = Arc::new(maestro);
let (_handle, sender) = Self::start_channel(maestro.clone());
let score = ScoreListWidget::new(Self::scores_list(&maestro), sender);
@ -72,20 +91,22 @@ impl HarmonyTUI {
}
fn start_channel(
maestro: Arc<Maestro>,
) -> (tokio::task::JoinHandle<()>, mpsc::Sender<HarmonyTuiEvent>) {
let (sender, mut receiver) = mpsc::channel::<HarmonyTuiEvent>(32);
maestro: Arc<Maestro<T>>,
) -> (
tokio::task::JoinHandle<()>,
mpsc::Sender<HarmonyTuiEvent<T>>,
) {
let (sender, mut receiver) = mpsc::channel::<HarmonyTuiEvent<T>>(32);
let handle = tokio::spawn(async move {
info!("Starting message channel receiver loop");
while let Some(event) = receiver.recv().await {
info!("Received event {event:#?}");
info!("Received event {event}");
match event {
HarmonyTuiEvent::LaunchScore(score_item) => {
let maestro = maestro.clone();
let joinhandle_result =
tokio::spawn(async move { maestro.interpret(score_item.0).await })
.await;
tokio::spawn(async move { maestro.interpret(score_item).await }).await;
match joinhandle_result {
Ok(interpretation_result) => match interpretation_result {
@ -111,7 +132,7 @@ impl HarmonyTUI {
// Set default level for unknown targets to Trace
tui_logger::set_default_level(log::LevelFilter::Info);
std::fs::create_dir_all("log")?;
tui_logger::set_log_file("log/harmony.log").unwrap();
tui_logger::set_log_file(TuiLoggerFile::new("log/harmony.log"));
color_eyre::install()?;
let mut terminal = ratatui::init();
@ -147,12 +168,13 @@ impl HarmonyTUI {
frame.render_widget(&help_block, help_area);
frame.render_widget(HelpWidget::new(), help_block.inner(help_area));
let [list_area, output_area] =
let [list_area, logger_area] =
Layout::horizontal([Constraint::Min(30), Constraint::Percentage(100)]).areas(app_area);
let block = Block::default().borders(Borders::RIGHT);
frame.render_widget(&block, list_area);
self.score.render(list_area, frame);
let tui_logger = tui_logger::TuiLoggerWidget::default()
.style_error(Style::default().fg(Color::Red))
.style_warn(Style::default().fg(Color::LightRed))
@ -160,16 +182,13 @@ impl HarmonyTUI {
.style_debug(Style::default().fg(Color::Gray))
.style_trace(Style::default().fg(Color::Gray))
.state(&self.tui_state);
frame.render_widget(tui_logger, output_area)
}
fn scores_list(maestro: &Maestro) -> Vec<ScoreItem> {
frame.render_widget(tui_logger, logger_area);
}
fn scores_list(maestro: &Maestro<T>) -> Vec<Box<dyn Score<T>>> {
let scores = maestro.scores();
let scores_read = scores.read().expect("Should be able to read scores");
scores_read
.iter()
.map(|s| ScoreItem(s.clone_box()))
.collect()
scores_read.iter().map(|s| s.clone_box()).collect()
}
async fn handle_event(&mut self, event: &Event) {
@ -189,18 +208,3 @@ impl HarmonyTUI {
}
}
}
#[derive(Debug)]
struct ScoreItem(Box<dyn Score>);
impl ScoreItem {
pub fn clone(&self) -> Self {
Self(self.0.clone_box())
}
}
impl Into<ListItem<'_>> for &ScoreItem {
fn into(self) -> ListItem<'static> {
ListItem::new(self.0.name())
}
}

View File

@ -1,22 +0,0 @@
use ratatui::layout::{Constraint, Flex, Layout, Rect};
/// Centers a [`Rect`] within another [`Rect`] using the provided [`Constraint`]s.
///
/// # Examples
///
/// ```rust
/// use ratatui::layout::{Constraint, Rect};
///
/// let area = Rect::new(0, 0, 100, 100);
/// let horizontal = Constraint::Percentage(20);
/// let vertical = Constraint::Percentage(30);
///
/// let centered = center(area, horizontal, vertical);
/// ```
pub(crate) fn center(area: Rect, horizontal: Constraint, vertical: Constraint) -> Rect {
let [area] = Layout::horizontal([horizontal])
.flex(Flex::Center)
.areas(area);
let [area] = Layout::vertical([vertical]).flex(Flex::Center).areas(area);
area
}

View File

@ -1,17 +1,17 @@
use std::sync::{Arc, RwLock};
use crate::HarmonyTuiEvent;
use crossterm::event::{Event, KeyCode, KeyEventKind};
use harmony::{score::Score, topology::Topology};
use log::{info, warn};
use ratatui::{
Frame,
layout::Rect,
style::{Style, Stylize},
widgets::{List, ListState, StatefulWidget, Widget},
widgets::{List, ListItem, ListState, StatefulWidget, Widget},
};
use tokio::sync::mpsc;
use crate::{HarmonyTuiEvent, ScoreItem};
#[derive(Debug)]
enum ExecutionState {
INITIATED,
@ -19,23 +19,34 @@ enum ExecutionState {
CANCELED,
}
#[derive(Debug)]
struct Execution {
struct Execution<T: Topology> {
state: ExecutionState,
score: ScoreItem,
score: Box<dyn Score<T>>,
}
#[derive(Debug)]
pub(crate) struct ScoreListWidget {
impl<T: Topology> std::fmt::Display for Execution<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"Execution of {} status {:?}",
self.score.name(),
self.state
))
}
}
pub(crate) struct ScoreListWidget<T: Topology> {
list_state: Arc<RwLock<ListState>>,
scores: Vec<ScoreItem>,
execution: Option<Execution>,
execution_history: Vec<Execution>,
sender: mpsc::Sender<HarmonyTuiEvent>,
scores: Vec<Box<dyn Score<T>>>,
execution: Option<Execution<T>>,
execution_history: Vec<Execution<T>>,
sender: mpsc::Sender<HarmonyTuiEvent<T>>,
}
impl ScoreListWidget {
pub(crate) fn new(scores: Vec<ScoreItem>, sender: mpsc::Sender<HarmonyTuiEvent>) -> Self {
impl<T: Topology> ScoreListWidget<T> {
pub(crate) fn new(
scores: Vec<Box<dyn Score<T>>>,
sender: mpsc::Sender<HarmonyTuiEvent<T>>,
) -> Self {
let mut list_state = ListState::default();
list_state.select_first();
let list_state = Arc::new(RwLock::new(list_state));
@ -49,23 +60,27 @@ impl ScoreListWidget {
}
pub(crate) fn launch_execution(&mut self) {
let list_read = self.list_state.read().unwrap();
if let Some(index) = list_read.selected() {
let score = self
.scores
.get(index)
.expect("List state should always match with internal Vec");
if let Some(score) = self.get_selected_score() {
self.execution = Some(Execution {
state: ExecutionState::INITIATED,
score: score.clone(),
score: score.clone_box(),
});
info!("{:#?}\n\nConfirm Execution (Press y/n)", score.0);
info!("{}\n\nConfirm Execution (Press y/n)", score.name());
info!("{}", score.print_score_details());
} else {
warn!("No Score selected, nothing to launch");
}
}
pub(crate) fn get_selected_score(&self) -> Option<Box<dyn Score<T>>> {
let list_read = self.list_state.read().unwrap();
if let Some(index) = list_read.selected() {
self.scores.get(index).map(|s| s.clone_box())
} else {
None
}
}
pub(crate) fn scroll_down(&self) {
self.list_state.write().unwrap().scroll_down_by(1);
}
@ -92,9 +107,9 @@ impl ScoreListWidget {
match confirm {
true => {
execution.state = ExecutionState::RUNNING;
info!("Launch execution {:?}", execution);
info!("Launch execution {execution}");
self.sender
.send(HarmonyTuiEvent::LaunchScore(execution.score.clone()))
.send(HarmonyTuiEvent::LaunchScore(execution.score.clone_box()))
.await
.expect("Should be able to send message");
}
@ -123,16 +138,21 @@ impl ScoreListWidget {
}
}
impl Widget for &ScoreListWidget {
impl<T: Topology> Widget for &ScoreListWidget<T> {
fn render(self, area: ratatui::prelude::Rect, buf: &mut ratatui::prelude::Buffer)
where
Self: Sized,
{
let mut list_state = self.list_state.write().unwrap();
let list = List::new(&self.scores)
let scores_items: Vec<ListItem<'_>> = self.scores.iter().map(score_to_list_item).collect();
let list = List::new(scores_items)
.highlight_style(Style::new().bold().italic())
.highlight_symbol("🠊 ");
StatefulWidget::render(list, area, buf, &mut list_state)
}
}
fn score_to_list_item<'a, T: Topology>(score: &'a Box<dyn Score<T>>) -> ListItem<'a> {
ListItem::new(score.name())
}

View File

@ -4,3 +4,6 @@ edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
serde = { version = "1.0.209", features = ["derive"] }

View File

@ -1,5 +1,7 @@
pub mod net {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
use serde::Serialize;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize)]
pub struct MacAddress(pub [u8; 6]);
impl MacAddress {

23
k3d/Cargo.toml Normal file
View File

@ -0,0 +1,23 @@
[package]
name = "k3d-rs"
edition = "2021"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
log = { workspace = true }
async-trait = { workspace = true }
tokio = { workspace = true }
octocrab = "0.44.0"
regex = "1.11.1"
reqwest = { version = "0.12", features = ["stream"] }
url.workspace = true
sha2 = "0.10.8"
futures-util = "0.3.31"
kube.workspace = true
[dev-dependencies]
env_logger = { workspace = true }
httptest = "0.16.3"
pretty_assertions = "1.4.1"

View File

@ -0,0 +1,303 @@
use futures_util::StreamExt;
use log::{debug, info, warn};
use sha2::{Digest, Sha256};
use std::io::Read;
use std::path::PathBuf;
use tokio::fs;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use url::Url;
const CHECKSUM_FAILED_MSG: &str = "Downloaded file failed checksum verification";
/// Represents an asset that can be downloaded from a URL with checksum verification.
///
/// This struct facilitates secure downloading of files from remote URLs by
/// verifying the integrity of the downloaded content using SHA-256 checksums.
/// It handles downloading the file, saving it to disk, and verifying the checksum matches
/// the expected value.
///
/// # Examples
///
/// ```compile_fail
/// # use url::Url;
/// # use std::path::PathBuf;
///
/// # async fn example() -> Result<(), String> {
/// let asset = DownloadableAsset {
/// url: Url::parse("https://example.com/file.zip").unwrap(),
/// file_name: "file.zip".to_string(),
/// checksum: "a1b2c3d4e5f6...".to_string(),
/// };
///
/// let download_dir = PathBuf::from("/tmp/downloads");
/// let file_path = asset.download_to_path(download_dir).await?;
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
pub(crate) struct DownloadableAsset {
pub(crate) url: Url,
pub(crate) file_name: String,
pub(crate) checksum: String,
}
impl DownloadableAsset {
fn verify_checksum(&self, file: PathBuf) -> bool {
if !file.exists() {
warn!("File does not exist: {:?}", file);
return false;
}
let mut file = match std::fs::File::open(&file) {
Ok(file) => file,
Err(e) => {
warn!("Failed to open file for checksum verification: {:?}", e);
return false;
}
};
let mut hasher = Sha256::new();
let mut buffer = [0; 1024 * 1024]; // 1MB buffer
loop {
let bytes_read = match file.read(&mut buffer) {
Ok(0) => break,
Ok(n) => n,
Err(e) => {
warn!("Error reading file for checksum: {:?}", e);
return false;
}
};
hasher.update(&buffer[..bytes_read]);
}
let result = hasher.finalize();
let calculated_hash = format!("{:x}", result);
debug!("Expected checksum: {}", self.checksum);
debug!("Calculated checksum: {}", calculated_hash);
calculated_hash == self.checksum
}
/// Downloads the asset to the specified directory, verifying its checksum.
///
/// This function will:
/// 1. Create the target directory if it doesn't exist
/// 2. Check if the file already exists with the correct checksum
/// 3. If not, download the file from the URL
/// 4. Verify the downloaded file's checksum matches the expected value
///
/// # Arguments
///
/// * `folder` - The directory path where the file should be saved
///
/// # Returns
///
/// * `Ok(PathBuf)` - The path to the downloaded file on success
/// * `Err(String)` - A descriptive error message if the download or verification fails
///
/// # Errors
///
/// This function will return an error if:
/// - The network request fails
/// - The server responds with a non-success status code
/// - Writing to disk fails
/// - The checksum verification fails
pub(crate) async fn download_to_path(&self, folder: PathBuf) -> Result<PathBuf, String> {
if !folder.exists() {
fs::create_dir_all(&folder)
.await
.expect("Failed to create download directory");
}
let target_file_path = folder.join(&self.file_name);
debug!("Downloading to path: {:?}", target_file_path);
if self.verify_checksum(target_file_path.clone()) {
debug!("File already exists with correct checksum, skipping download");
return Ok(target_file_path);
}
debug!("Downloading from URL: {}", self.url);
let client = reqwest::Client::new();
let response = client
.get(self.url.clone())
.send()
.await
.map_err(|e| format!("Failed to download file: {e}"))?;
if !response.status().is_success() {
return Err(format!(
"Failed to download file, status: {}",
response.status()
));
}
let mut file = File::create(&target_file_path)
.await
.expect("Failed to create target file");
let mut stream = response.bytes_stream();
while let Some(chunk_result) = stream.next().await {
let chunk = chunk_result.expect("Error while downloading file");
file.write_all(&chunk)
.await
.expect("Failed to write data to file");
}
file.flush().await.expect("Failed to flush file");
drop(file);
if !self.verify_checksum(target_file_path.clone()) {
return Err(CHECKSUM_FAILED_MSG.to_string());
}
info!(
"File downloaded and verified successfully: {}",
target_file_path.to_string_lossy()
);
Ok(target_file_path)
}
}
#[cfg(test)]
mod tests {
use super::*;
use httptest::{
matchers::{self, request},
responders, Expectation, Server,
};
const BASE_TEST_PATH: &str = "/tmp/harmony-test-k3d-download";
const TEST_CONTENT: &str = "This is a test file.";
const TEST_CONTENT_HASH: &str =
"f29bc64a9d3732b4b9035125fdb3285f5b6455778edca72414671e0ca3b2e0de";
fn setup_test() -> (PathBuf, Server) {
let _ = env_logger::builder().try_init();
// Create unique test directory
let test_id = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis();
let download_path = format!("{}/test_{}", BASE_TEST_PATH, test_id);
std::fs::create_dir_all(&download_path).unwrap();
(PathBuf::from(download_path), Server::run())
}
#[tokio::test]
async fn test_download_to_path_success() {
let (folder, server) = setup_test();
server.expect(
Expectation::matching(request::method_path("GET", "/test.txt"))
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
);
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: TEST_CONTENT_HASH.to_string(),
};
let result = asset
.download_to_path(folder.join("success"))
.await
.unwrap();
let downloaded_content = std::fs::read_to_string(result).unwrap();
assert_eq!(downloaded_content, TEST_CONTENT);
}
#[tokio::test]
async fn test_download_to_path_already_exists() {
let (folder, server) = setup_test();
server.expect(
Expectation::matching(matchers::any())
.times(0)
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
);
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: TEST_CONTENT_HASH.to_string(),
};
let target_file_path = folder.join(&asset.file_name);
std::fs::write(&target_file_path, TEST_CONTENT).unwrap();
let result = asset.download_to_path(folder).await.unwrap();
let content = std::fs::read_to_string(result).unwrap();
assert_eq!(content, TEST_CONTENT);
}
#[tokio::test]
async fn test_download_to_path_server_error() {
let (folder, server) = setup_test();
server.expect(
Expectation::matching(matchers::any()).respond_with(responders::status_code(404)),
);
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: TEST_CONTENT_HASH.to_string(),
};
let result = asset.download_to_path(folder.join("error")).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("status: 404"));
}
#[tokio::test]
async fn test_download_to_path_checksum_failure() {
let (folder, server) = setup_test();
let invalid_content = "This is NOT the expected content";
server.expect(
Expectation::matching(matchers::any())
.respond_with(responders::status_code(200).body(invalid_content)),
);
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: TEST_CONTENT_HASH.to_string(),
};
let join_handle =
tokio::spawn(async move { asset.download_to_path(folder.join("failure")).await });
assert_eq!(
join_handle.await.unwrap().err().unwrap(),
CHECKSUM_FAILED_MSG
);
}
#[tokio::test]
async fn test_download_with_specific_path_matcher() {
let (folder, server) = setup_test();
server.expect(
Expectation::matching(matchers::request::path("/specific/path.txt"))
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
);
let asset = DownloadableAsset {
url: Url::parse(&server.url("/specific/path.txt").to_string()).unwrap(),
file_name: "path.txt".to_string(),
checksum: TEST_CONTENT_HASH.to_string(),
};
let result = asset.download_to_path(folder).await.unwrap();
let downloaded_content = std::fs::read_to_string(result).unwrap();
assert_eq!(downloaded_content, TEST_CONTENT);
}
}

410
k3d/src/lib.rs Normal file
View File

@ -0,0 +1,410 @@
mod downloadable_asset;
use downloadable_asset::*;
use kube::Client;
use log::{debug, info, warn};
use std::path::PathBuf;
const K3D_BIN_FILE_NAME: &str = "k3d";
pub struct K3d {
base_dir: PathBuf,
cluster_name: Option<String>,
}
impl K3d {
pub fn new(base_dir: PathBuf, cluster_name: Option<String>) -> Self {
Self {
base_dir,
cluster_name,
}
}
async fn get_binary_for_current_platform(
&self,
latest_release: octocrab::models::repos::Release,
) -> DownloadableAsset {
let os = std::env::consts::OS;
let arch = std::env::consts::ARCH;
debug!("Detecting platform: OS={}, ARCH={}", os, arch);
let binary_pattern = match (os, arch) {
("linux", "x86") => "k3d-linux-386",
("linux", "x86_64") => "k3d-linux-amd64",
("linux", "arm") => "k3d-linux-arm",
("linux", "aarch64") => "k3d-linux-arm64",
("windows", "x86_64") => "k3d-windows-amd64.exe",
("macos", "x86_64") => "k3d-darwin-amd64",
("macos", "aarch64") => "k3d-darwin-arm64",
_ => panic!("Unsupported platform: {}-{}", os, arch),
};
debug!("Looking for binary matching pattern: {}", binary_pattern);
let binary_asset = latest_release
.assets
.iter()
.find(|asset| asset.name == binary_pattern)
.unwrap_or_else(|| panic!("No matching binary found for {}", binary_pattern));
let binary_url = binary_asset.browser_download_url.clone();
let checksums_asset = latest_release
.assets
.iter()
.find(|asset| asset.name == "checksums.txt")
.expect("Checksums file not found in release assets");
let checksums_url = checksums_asset.browser_download_url.clone();
let body = reqwest::get(checksums_url)
.await
.unwrap()
.text()
.await
.unwrap();
println!("body: {body}");
let checksum = body
.lines()
.find_map(|line| {
if line.ends_with(&binary_pattern) {
Some(line.split_whitespace().next().unwrap_or("").to_string())
} else {
None
}
})
.unwrap_or_else(|| panic!("Checksum not found for {}", binary_pattern));
debug!("Found binary at {} with checksum {}", binary_url, checksum);
DownloadableAsset {
url: binary_url,
file_name: K3D_BIN_FILE_NAME.to_string(),
checksum,
}
}
pub async fn download_latest_release(&self) -> Result<PathBuf, String> {
let latest_release = self.get_latest_release_tag().await.unwrap();
let release_binary = self.get_binary_for_current_platform(latest_release).await;
info!("Foudn K3d binary to install : {release_binary:#?}");
release_binary.download_to_path(self.base_dir.clone()).await
}
// TODO : Make sure this will only find actual released versions, no prereleases or test
// builds
pub async fn get_latest_release_tag(&self) -> Result<octocrab::models::repos::Release, String> {
let octo = octocrab::instance();
let latest_release = octo
.repos("k3d-io", "k3d")
.releases()
.get_latest()
.await
.map_err(|e| e.to_string())?;
// debug!("Got k3d releases {releases:#?}");
println!("Got k3d first releases {latest_release:#?}");
Ok(latest_release)
}
/// Checks if k3d binary exists and is executable
///
/// Verifies that:
/// 1. The k3d binary exists in the base directory
/// 2. It has proper executable permissions (on Unix systems)
/// 3. It responds correctly to a simple command (`k3d --version`)
pub fn is_installed(&self) -> bool {
let binary_path = self.get_k3d_binary_path();
if !binary_path.exists() {
debug!("K3d binary not found at {:?}", binary_path);
return false;
}
if !self.ensure_binary_executable(&binary_path) {
return false;
}
self.can_execute_binary_check(&binary_path)
}
/// Verifies if the specified cluster is already created
///
/// Executes `k3d cluster list <cluster_name>` and checks for a successful response,
/// indicating that the cluster exists and is registered with k3d.
pub fn is_cluster_initialized(&self) -> bool {
let cluster_name = match self.get_cluster_name() {
Ok(name) => name,
Err(_) => {
debug!("Could not get cluster name, can't verify if cluster is initialized");
return false;
}
};
let binary_path = self.base_dir.join(K3D_BIN_FILE_NAME);
if !binary_path.exists() {
return false;
}
self.verify_cluster_exists(&binary_path, cluster_name)
}
fn get_cluster_name(&self) -> Result<&String, String> {
match &self.cluster_name {
Some(name) => Ok(name),
None => Err("No cluster name available".to_string()),
}
}
/// Creates a new k3d cluster with the specified name
///
/// This method:
/// 1. Creates a new k3d cluster using `k3d cluster create <cluster_name>`
/// 2. Waits for the cluster to initialize
/// 3. Returns a configured Kubernetes client connected to the cluster
///
/// # Returns
/// - `Ok(Client)` - Successfully created cluster and connected client
/// - `Err(String)` - Error message detailing what went wrong
pub async fn initialize_cluster(&self) -> Result<Client, String> {
let cluster_name = match self.get_cluster_name() {
Ok(name) => name,
Err(_) => return Err("Could not get cluster_name, cannot initialize".to_string()),
};
info!("Initializing k3d cluster '{}'", cluster_name);
self.create_cluster(cluster_name)?;
self.create_kubernetes_client().await
}
fn get_k3d_binary_path(&self) -> PathBuf {
self.base_dir.join(K3D_BIN_FILE_NAME)
}
fn get_k3d_binary(&self) -> Result<PathBuf, String> {
let path = self.get_k3d_binary_path();
if !path.exists() {
return Err(format!("K3d binary not found at {:?}", path));
}
Ok(path)
}
/// Ensures k3d is installed and the cluster is initialized
///
/// This method provides a complete setup flow:
/// 1. Checks if k3d is installed, downloads and installs it if needed
/// 2. Verifies if the specified cluster exists, creates it if not
/// 3. Returns a Kubernetes client connected to the cluster
///
/// # Returns
/// - `Ok(Client)` - Successfully ensured k3d and cluster are ready
/// - `Err(String)` - Error message if any step failed
pub async fn ensure_installed(&self) -> Result<Client, String> {
if !self.is_installed() {
info!("K3d is not installed, downloading latest release");
self.download_latest_release()
.await
.map_err(|e| format!("Failed to download k3d: {}", e))?;
if !self.is_installed() {
return Err("Failed to install k3d properly".to_string());
}
}
if !self.is_cluster_initialized() {
info!("Cluster is not initialized, initializing now");
return self.initialize_cluster().await;
}
self.start_cluster().await?;
info!("K3d and cluster are already properly set up");
self.create_kubernetes_client().await
}
// Private helper methods
#[cfg(not(target_os = "windows"))]
fn ensure_binary_executable(&self, binary_path: &PathBuf) -> bool {
use std::os::unix::fs::PermissionsExt;
let mut perms = match std::fs::metadata(binary_path) {
Ok(metadata) => metadata.permissions(),
Err(e) => {
debug!("Failed to get binary metadata: {}", e);
return false;
}
};
perms.set_mode(0o755);
if let Err(e) = std::fs::set_permissions(binary_path, perms) {
debug!("Failed to set executable permissions on k3d binary: {}", e);
return false;
}
true
}
#[cfg(target_os = "windows")]
fn ensure_binary_executable(&self, _binary_path: &PathBuf) -> bool {
// Windows doesn't use executable file permissions
true
}
fn can_execute_binary_check(&self, binary_path: &PathBuf) -> bool {
match std::process::Command::new(binary_path)
.arg("--version")
.output()
{
Ok(output) => {
if output.status.success() {
debug!("K3d binary is installed and working");
true
} else {
debug!("K3d binary check failed: {:?}", output);
false
}
}
Err(e) => {
debug!("Failed to execute K3d binary: {}", e);
false
}
}
}
fn verify_cluster_exists(&self, binary_path: &PathBuf, cluster_name: &str) -> bool {
match std::process::Command::new(binary_path)
.args(["cluster", "list", cluster_name, "--no-headers"])
.output()
{
Ok(output) => {
if output.status.success() && !output.stdout.is_empty() {
debug!("Cluster '{}' is initialized", cluster_name);
true
} else {
debug!("Cluster '{}' is not initialized", cluster_name);
false
}
}
Err(e) => {
debug!("Failed to check cluster initialization: {}", e);
false
}
}
}
pub fn run_k3d_command<I, S>(&self, args: I) -> Result<std::process::Output, String>
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
{
let binary_path = self.get_k3d_binary()?;
let output = std::process::Command::new(binary_path).args(args).output();
match output {
Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr);
debug!("stderr : {}", stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
debug!("stdout : {}", stdout);
Ok(output)
}
Err(e) => Err(format!("Failed to execute k3d command: {}", e)),
}
}
fn create_cluster(&self, cluster_name: &str) -> Result<(), String> {
let output = self.run_k3d_command(["cluster", "create", cluster_name])?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to create cluster: {}", stderr));
}
info!("Successfully created k3d cluster '{}'", cluster_name);
Ok(())
}
async fn create_kubernetes_client(&self) -> Result<Client, String> {
warn!("TODO this method is way too dumb, it should make sure that the client is connected to the k3d cluster actually represented by this instance, not just any default client");
Client::try_default()
.await
.map_err(|e| format!("Failed to create Kubernetes client: {}", e))
}
pub async fn get_client(&self) -> Result<Client, String> {
match self.is_cluster_initialized() {
true => Ok(self.create_kubernetes_client().await?),
false => Err("Cannot get client! Cluster not initialized yet".to_string()),
}
}
async fn start_cluster(&self) -> Result<(), String> {
let cluster_name = self.get_cluster_name()?;
let output = self.run_k3d_command(["cluster", "start", cluster_name])?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to start cluster: {}", stderr));
}
info!("Successfully started k3d cluster '{}'", cluster_name);
Ok(())
}
}
#[cfg(test)]
mod test {
use regex::Regex;
use std::path::PathBuf;
use crate::{K3d, K3D_BIN_FILE_NAME};
#[tokio::test]
async fn k3d_latest_release_should_get_latest() {
let dir = get_clean_test_directory();
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false);
let k3d = K3d::new(dir.clone(), None);
let latest_release = k3d.get_latest_release_tag().await.unwrap();
let tag_regex = Regex::new(r"^v\d+\.\d+\.\d+$").unwrap();
assert!(tag_regex.is_match(&latest_release.tag_name));
assert!(!latest_release.tag_name.is_empty());
}
#[tokio::test]
async fn k3d_download_latest_release_should_get_latest_bin() {
let dir = get_clean_test_directory();
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), false);
let k3d = K3d::new(dir.clone(), None);
let bin_file_path = k3d.download_latest_release().await.unwrap();
assert_eq!(bin_file_path, dir.join(K3D_BIN_FILE_NAME));
assert_eq!(dir.join(K3D_BIN_FILE_NAME).exists(), true);
}
fn get_clean_test_directory() -> PathBuf {
let dir = PathBuf::from("/tmp/harmony-k3d-test-dir");
if dir.exists() {
if let Err(e) = std::fs::remove_dir_all(&dir) {
// TODO sometimes this fails because of the race when running multiple tests at
// once
panic!("Failed to clean up test directory: {}", e);
}
}
if let Err(e) = std::fs::create_dir_all(&dir) {
panic!("Failed to create test directory: {}", e);
}
dir
}
}

View File

@ -4,13 +4,6 @@ use yaserde::MaybeString;
use super::opnsense::{NumberOption, Range, StaticMap};
// #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
// #[yaserde(rename = "dhcpd")]
// pub struct Dhcpd {
// #[yaserde(rename = "lan")]
// pub lan: DhcpInterface,
// }
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct DhcpInterface {
pub enable: Option<MaybeString>,
@ -44,77 +37,3 @@ pub struct DhcpRange {
#[yaserde(rename = "to")]
pub to: String,
}
#[cfg(test)]
mod test {
use crate::xml_utils::to_xml_str;
use pretty_assertions::assert_eq;
#[test]
fn dhcpd_should_deserialize_serialize_identical() {
let dhcpd: Dhcpd =
yaserde::de::from_str(SERIALIZED_DHCPD).expect("Deserialize Dhcpd failed");
assert_eq!(
to_xml_str(&dhcpd).expect("Serialize Dhcpd failed"),
SERIALIZED_DHCPD
);
}
const SERIALIZED_DHCPD: &str = "<?xml version=\"1.0\"?>
<dhcpd>
<lan>
<enable>1</enable>
<gateway>192.168.20.1</gateway>
<domain>somedomain.yourlocal.mcd</domain>
<ddnsdomainalgorithm>hmac-md5</ddnsdomainalgorithm>
<numberoptions>
<item/>
</numberoptions>
<range>
<from>192.168.20.50</from>
<to>192.168.20.200</to>
</range>
<winsserver/>
<dnsserver>192.168.20.1</dnsserver>
<ntpserver/>
<staticmap>
<mac>55:55:55:55:55:1c</mac>
<ipaddr>192.168.20.160</ipaddr>
<hostname>somehost983</hostname>
<descr>someservire8</descr>
<winsserver/>
<dnsserver/>
<ntpserver/>
</staticmap>
<staticmap>
<mac>55:55:55:55:55:1c</mac>
<ipaddr>192.168.20.155</ipaddr>
<hostname>somehost893</hostname>
<winsserver/>
<dnsserver/>
<ntpserver/>
</staticmap>
<staticmap>
<mac>55:55:55:55:55:1c</mac>
<ipaddr>192.168.20.165</ipaddr>
<hostname>somehost893</hostname>
<descr/>
<winsserver/>
<dnsserver/>
<ntpserver/>
</staticmap>
<staticmap>
<mac>55:55:55:55:55:1c</mac>
<ipaddr>192.168.20.50</ipaddr>
<hostname>hostswitch2</hostname>
<descr>switch-2 (bottom)</descr>
<winsserver/>
<dnsserver/>
<ntpserver/>
</staticmap>
<pool/>
</lan>
</dhcpd>\n";
}

View File

@ -132,22 +132,18 @@ mod test {
<interfaces>
<paul>
<if></if>
<descr></descr>
<enable/>
</paul>
<anotherpaul>
<if></if>
<descr></descr>
<enable/>
</anotherpaul>
<thirdone>
<if></if>
<descr></descr>
<enable/>
</thirdone>
<andgofor4>
<if></if>
<descr></descr>
<enable/>
</andgofor4>
</interfaces>

Some files were not shown because too many files have changed in this diff Show More