diff --git a/Cargo.lock b/Cargo.lock index fb8f3a4..f9c279f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -815,6 +815,20 @@ dependencies = [ "url", ] +[[package]] +name = "example-lamp" +version = "0.1.0" +dependencies = [ + "cidr", + "env_logger", + "harmony", + "harmony_macros", + "harmony_types", + "log", + "tokio", + "url", +] + [[package]] name = "example-nanodc" version = "0.1.0" @@ -1144,6 +1158,7 @@ dependencies = [ "rust-ipmi", "semver", "serde", + "serde-value", "serde_json", "serde_yaml", "tokio", @@ -1181,6 +1196,9 @@ dependencies = [ [[package]] name = "harmony_types" version = "0.1.0" +dependencies = [ + "serde", +] [[package]] name = "hashbrown" diff --git a/Cargo.toml b/Cargo.toml index 8c7afdd..c36cd27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ url = "2.5.4" kube = "0.98.0" k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] } serde_yaml = "0.9.34" +serde-value = "0.7.0" http = "1.2.0" [workspace.dependencies.uuid] diff --git a/README.md b/README.md index 277356d..6fed6eb 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,13 @@ -### Watch the whole repo on every change +# Harmony : Open Infrastructure Orchestration -Due to the current setup being a mix of separate repositories with gitignore and rust workspace, a few options are required for cargo-watch to have the desired behavior : +## Quick demo -```sh -RUST_LOG=info cargo watch --ignore-nothing -w harmony -w private_repos/ -x 'run --bin nationtech' -``` +`cargo run -p example-tui` -This will run the nationtech bin (likely `private_repos/nationtech/src/main.rs`) on any change in the harmony or private_repos folders. +This will launch Harmony's minimalist terminal ui which embeds a few demo scores. + +Usage instructions will be displayed at the bottom of the TUI. + +## Core architecture + +![Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg) diff --git a/adr/003-infrastructure-abstractions.md b/adr/003-infrastructure-abstractions.md index 3d01531..5785bd4 100644 --- a/adr/003-infrastructure-abstractions.md +++ b/adr/003-infrastructure-abstractions.md @@ -1,12 +1,18 @@ -**Architecture Decision Record: Harmony Infrastructure Abstractions** +## Architecture Decision Record: Core Harmony Infrastructure Abstractions -**Status**: Proposed +## Status -**Context**: Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases. +Proposed -**Decision**: We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations. +## Context -**Example: Database Abstraction** +Harmony is an infrastructure orchestrator written in pure Rust, aiming to provide real portability of automation across different cloud providers and infrastructure setups. To achieve this, we need to define infrastructure abstractions that are provider-agnostic and flexible enough to accommodate various use cases. + +## Decision + +We will define our infrastructure abstractions using a domain-driven approach, focusing on the core logic of Harmony. These abstractions will only include the absolutely required elements for a specific resource, without referencing specific providers or implementations. + +### Example: Database Abstraction To deploy a database to any cloud provider, we define an abstraction that includes essential elements such as: ```rust diff --git a/adr/005-interactive-project.md b/adr/005-interactive-project.md new file mode 100644 index 0000000..01e9794 --- /dev/null +++ b/adr/005-interactive-project.md @@ -0,0 +1,80 @@ +# Architecture Decision Record: Interactive project setup for automated delivery pipeline of various codebases + +## Status + +Proposal + +## Context + +Many categories of developers, of which we will focus on LAMP (Linux Apache, MySQL, PHP) developers at first, are underserved by modern delivery tools. + +Most of these projects are developed with a small team, small budget, but still are mission critical to their users. + +We believe that Harmony, with its end-to-end infrastructure orchestration approach, enables relatively easy integration for this category of projects in a modern delivery pipeline that is opinionated enough that the development team is not overwhelmed by choices, but also flexible enough to allow them to deploy their application according to their habits. This inclues local development, managed dedicated servers, virtualized environments, manual dashboards like CPanel, cloud providers, etc. + +To enable this, we need to provide an easy way for developers to step on to the harmony pipeline without disrupting their workflow. + +This ADR will outline the approach taken to go from a LAMP project to be standalone, to a LAMP project using harmony that can benefit from all the enterprise grade features of our opinionated delivery pipeline including : + +- Automated environment provisionning (local, staging, uat, prod) +- Infrastructure optimized for the delivery stage + - Production with automated backups +- Automated domain names for early stages, configured domain name for production +- SSL certificates +- Secret management +- SSO integration +- IDP, IDS security +- Monitoring, logging +- Artifact registry +- Automated deployment and rollback +- Dependency management (databases, configuration, scripts) + +## Decision + + +# Custom Rust DSL + +We decided to develop a rust based DSL. Even though this means people might be "afraid of Rust", we believe the numerous advantages are worth the risk. + +The main selection criterias are : + +- Robustness : the application/infrastructure definition should not be fragile to typos or versioning. Rusts robust dependency management (cargo) and type safety are best in class for robustness +- Flexibility : Writing the definition in a standard programming language empowers users to easily leverage the internals of harmony to adapt the code to their needs. +- Extensibility : Once again, a standard programming language enables easily importing a configuration, or multiple configurations, create reusable bits, and build upon the different components to really take control over a complex multi-project deployment without going crazy because of a typo in a yaml definition that changed 4 years ago + +## Consequences + +### Positive + +- Complete control over the syntax and semantics of the DSL, tailored specifically to our needs. +- Potential for better performance optimizations as we can implement exactly what is required without additional abstractions. + +### Negative + +- Higher initial development cost due to building a new language from scratch. +- Steeper learning curve for developers who need to use the DSL. +- Lack of an existing community and ecosystem, which could slow down adoption. +- Increased maintenance overhead as the DSL needs to be updated and supported internally. + +## Alternatives considered + +### Score spec + +We considered integrating with the score-spec project : https://github.com/score-spec/spec + +The idea was to benefit from an existing community and ecosystem. The motivations to consider score were the following : + +- It is a CNCF project, which helps a lot with adoption and community building +- It already supports important targets for us including docker-compose and k8s +- It provides a way to define the application's infrastructure at the correct level of abstraction for us to deploy it anywhere -- that is the goal of the score-spec project +- Once we evolve, we can simply have a score compatible provider that allows any project with a score spec to be deployed on the harmony stack +- Score was built with enterprise use-cases in mind : Humanitec platform engineering customers + + +Positive Consequences + +- Score Community is growing, using harmony will be very easy for them + +Negative Consequences + +- Score is not that big yet, and mostly used by Humanitec's clients (I guess), which is a hard to penetrate environment diff --git a/adr/006-secret-management.md b/adr/006-secret-management.md index 370d6db..c11b7e4 100644 --- a/adr/006-secret-management.md +++ b/adr/006-secret-management.md @@ -5,6 +5,7 @@ Proposed ### TODO [#3](https://git.nationtech.io/NationTech/harmony/issues/3): + Before accepting this proposal we need to run a POC to validate this potential issue : **Keycloak Misuse**: Using Keycloak primarily as a secrets manager is inappropriate, as it's designed for identity and access management (IAM), not secrets management. This creates scalability and functionality limitations. diff --git a/adr/007-default-runtime.md b/adr/007-default-runtime.md new file mode 100644 index 0000000..c1032d2 --- /dev/null +++ b/adr/007-default-runtime.md @@ -0,0 +1,65 @@ +## Architecture Decision Record: Default Runtime for Managed Workloads + +### Status + +Proposed + +### Context + +Our infrastructure orchestrator manages workloads requiring a Kubernetes-compatible runtime environment. + +**Requirements** + +- Cross-platform (Linux, Windows, macOS) +- Kubernetes compatibility +- Lightweight, easy setup with minimal dependencies +- Clean host teardown and minimal residue +- Well-maintained and actively supported + +### Decision + +We select **k3d (k3s in Docker)** as our default runtime environment across all supported platforms (Linux, Windows, macOS). + +### Rationale + +- **Consistency Across Platforms:** + One solution for all platforms simplifies development, supports documentation, and reduces complexity. + +- **Simplified Setup and Teardown:** + k3d runs Kubernetes clusters in Docker containers, allowing quick setup, teardown, and minimal host residue. + +- **Leveraging Existing Container Ecosystem:** + Docker/container runtimes are widely adopted, making their presence and familiarity common among users. + +- **Kubernetes Compatibility:** + k3s (within k3d) is fully Kubernetes-certified, ensuring compatibility with standard Kubernetes tools and manifests. + +- **Active Maintenance and Community:** + k3d and k3s both have active communities and are well-maintained. + +### Consequences + +#### Positive + +- **Uniform User Experience:** Users have a consistent setup experience across all platforms. +- **Reduced Support Overhead:** Standardizing runtime simplifies support, documentation, and troubleshooting. +- **Clean Isolation:** Containerization allows developers to easily clean up clusters without affecting host systems. +- **Facilitates Multi-Cluster Development:** Easy creation and management of multiple clusters concurrently. + +#### Negative + +- **Docker Dependency:** Requires Docker (or compatible runtime) on all platforms. +- **Potential Overhead:** Slight performance/resource overhead compared to native k3s. +- **Docker Licensing Considerations:** Enterprise licensing of Docker Desktop could introduce additional considerations. + +### Alternatives Considered + +- **Native k3s (Linux) / k3d (Windows/macOS):** Original proposal. Rejected for greater simplicity and consistency. +- **Minikube, MicroK8s, Kind:** Rejected due to complexity, resource usage, or narrower use-case focus. +- **Docker Compose, Podman Desktop:** Rejected due to lack of orchestration or current limited k3d compatibility. + +### Future Work + +- Evaluate Podman Desktop or other container runtimes to avoid Docker dependency. +- Continuously monitor k3d maturity and stability. +- Investigate WebAssembly (WASM) runtimes as emerging alternatives for containerized workloads. diff --git a/adr/008-score-display-formatting.md b/adr/008-score-display-formatting.md new file mode 100644 index 0000000..7bc0620 --- /dev/null +++ b/adr/008-score-display-formatting.md @@ -0,0 +1,62 @@ +## Architecture Decision Record: Data Representation and UI Rendering for Score Types + +**Status:** Proposed + +**TL;DR:** `Score` types will be serialized (using `serde`) for presentation in UIs. This decouples data definition from presentation, improving scalability and reducing complexity for developers defining `Score` types. New UI types only need to handle existing field types, and new `Score` types don’t require UI changes as long as they use existing field types. Adding a new field type *does* require updates to all UIs. + +**Key benefits:** Scalability, reduced complexity for `Score` authors, decoupling of data and presentation. + +**Key trade-off:** Adding new field types requires updating all UIs. + +--- + +**Context:** + +Harmony is a pure Rust infrastructure orchestrator focused on compile-time safety and providing a developer-friendly, Ansible-module-like experience for defining infrastructure configurations via "Scores". These Scores (e.g., `LAMPScore`) are Rust structs composed of specific, strongly-typed fields (e.g., `VersionField`, `UrlField`, `PathField`) which are validated at compile-time using macros (`Version!`, `Url!`, etc.). + +A key requirement is displaying the configuration defined in these Scores across various user interfaces (Web UI, TUI, potentially Mobile UI, etc.) in a consistent and type-safe manner. As the number of Score types is expected to grow significantly (hundreds or thousands), we need a scalable approach for rendering their data that avoids tightly coupling Score definitions to specific UI implementations. + +The primary challenge is preventing the need for every `Score` struct author to implement multiple display traits (e.g., `Display`, `WebDisplay`, `TuiDisplay`) for every potential UI target. This would create an N x M complexity problem (N Scores * M UI types) and place an unreasonable burden on Score developers, hindering scalability and maintainability. + +**Decision:** + +1. **Mandatory Serialization:** All `Score` structs *must* implement `serde::Serialize` and `serde::Deserialize`. They *will not* be required to implement `std::fmt::Display` or any custom UI-specific display traits (e.g., `WebDisplay`, `TuiDisplay`). +2. **Field-Level Rendering:** Responsibility for rendering data will reside within the UI components. Each UI (Web, TUI, etc.) will implement logic to display *individual field types* (e.g., `UrlField`, `VersionField`, `IpAddressField`, `SecretField`). +3. **Data Access via Serialization:** UIs will primarily interact with `Score` data through its serialized representation (e.g., JSON obtained via `serde_json`). This provides a standardized interface for UIs to consume the data structure agnostic of the specific `Score` type. Alternatively, UIs *could* potentially use reflection or specific visitor patterns on the `Score` struct itself, but serialization is the preferred decoupling mechanism. + +**Rationale:** + +1. **Decoupling Data from Presentation:** This decision cleanly separates the data definition (`Score` structs and their fields) from the presentation logic (UI rendering). `Score` authors can focus solely on defining the data and its structure, while UI developers focus on how to best present known data *types*. +2. **Scalability:** This approach scales significantly better than requiring display trait implementations on Scores: + * Adding a *new Score type* requires *no changes* to existing UI code, provided it uses existing field types. + * Adding a *new UI type* requires implementing rendering logic only for the defined set of *field types*, not for every individual `Score` type. This reduces the N x M complexity to N + M complexity (approximately). +3. **Simplicity for Score Authors:** Requiring only `serde::Serialize + Deserialize` (which can often be derived automatically with `#[derive(Serialize, Deserialize)]`) is a much lower burden than implementing custom rendering logic for multiple, potentially unknown, UI targets. +4. **Leverages Rust Ecosystem Standards:** `serde` is the de facto standard for serialization and deserialization in Rust. Relying on it aligns with common Rust practices and benefits from its robustness, performance, and extensive tooling. +5. **Consistency for UIs:** Serialization provides a consistent, structured format (like JSON) for UIs to consume data, regardless of the underlying `Score` struct's complexity or composition. +6. **Flexibility for UI Implementation:** UIs can choose the best way to render each field type based on their capabilities (e.g., a `UrlField` might be a clickable link in a Web UI, plain text in a TUI; a `SecretField` might be masked). + +**Consequences:** + +**Positive:** + +* Greatly improved scalability for adding new Score types and UI targets. +* Strong separation of concerns between data definition and presentation. +* Reduced implementation burden and complexity for Score authors. +* Consistent mechanism for UIs to access and interpret Score data. +* Aligns well with the Hexagonal Architecture (ADR-002) by treating UIs as adapters interacting with the application core via a defined port (the serialized data contract). + +**Negative:** + +* Adding a *new field type* (e.g., `EmailField`) requires updates to *all* existing UI implementations to support rendering it. +* UI components become dependent on the set of defined field types and need comprehensive logic to handle each one appropriately. +* Potential minor overhead of serialization/deserialization compared to direct function calls (though likely negligible for UI purposes). +* Requires careful design and management of the standard library of field types. + +**Alternatives Considered:** + +1. **`Score` Implements `std::fmt::Display`:** + * _Rejected:_ Too simplistic. Only suitable for basic text rendering, doesn't cater to structured UIs (Web, etc.), and doesn't allow type-specific rendering logic (e.g., masking secrets). Doesn't scale to multiple UI formats. +2. **`Score` Implements Multiple Custom Display Traits (`WebDisplay`, `TuiDisplay`, etc.):** + * _Rejected:_ Leads directly to the N x M complexity problem. Tightly couples Score definitions to specific UI implementations. Places an excessive burden on Score authors, hindering adoption and scalability. +3. **Generic Display Trait with Context (`Score` implements `DisplayWithContext`):** + * _Rejected:_ More flexible than multiple traits, but still requires Score authors to implement potentially complex rendering logic within the `Score` definition itself. The `Score` would still need awareness of different UI contexts, leading to undesirable coupling. Managing context types adds complexity. diff --git a/adr/core-abstractions/main_context_prompt.md b/adr/core-abstractions/main_context_prompt.md new file mode 100644 index 0000000..4b1e54e --- /dev/null +++ b/adr/core-abstractions/main_context_prompt.md @@ -0,0 +1,360 @@ + +# Here is the current condenses architecture sample for Harmony's core abstractions + +```rust +use std::process::Command; + +pub trait Capability {} + +pub trait CommandCapability: Capability { + fn execute_command(&self, command: &str, args: &[&str]) -> Result; +} + +pub trait KubernetesCapability: Capability { + fn apply_manifest(&self, manifest: &str) -> Result<(), String>; + fn get_resource(&self, resource_type: &str, name: &str) -> Result; +} + +pub trait Topology { + fn name(&self) -> &str; +} + +pub trait Score { + fn compile(&self) -> Result>, String>; + fn name(&self) -> &str; +} + +pub struct LinuxHostTopology { + name: String, + host: String, +} + +impl Capability for LinuxHostTopology {} + +impl LinuxHostTopology { + pub fn new(name: String, host: String) -> Self { + Self { name, host } + } +} + +impl Topology for LinuxHostTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for LinuxHostTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> Result { + println!("Executing on {}: {} {:?}", self.host, command, args); + // In a real implementation, this would SSH to the host and execute the command + let output = Command::new(command) + .args(args) + .output() + .map_err(|e| e.to_string())?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } else { + Err(String::from_utf8_lossy(&output.stderr).to_string()) + } + } +} + +pub struct K3DTopology { + name: String, + linux_host: LinuxHostTopology, + cluster_name: String, +} + +impl Capability for K3DTopology {} + +impl K3DTopology { + pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self { + Self { + name, + linux_host, + cluster_name, + } + } +} + +impl Topology for K3DTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for K3DTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> Result { + self.linux_host.execute_command(command, args) + } +} + +impl KubernetesCapability for K3DTopology { + fn apply_manifest(&self, manifest: &str) -> Result<(), String> { + println!("Applying manifest to K3D cluster '{}'", self.cluster_name); + // Write manifest to a temporary file + let temp_file = format!("/tmp/manifest-harmony-temp.yaml"); + + // Use the linux_host directly to avoid capability trait bounds + self.linux_host + .execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?; + + // Apply with kubectl + self.linux_host.execute_command("kubectl", &[ + "--context", + &format!("k3d-{}", self.cluster_name), + "apply", + "-f", + &temp_file, + ])?; + + Ok(()) + } + + fn get_resource(&self, resource_type: &str, name: &str) -> Result { + println!( + "Getting resource {}/{} from K3D cluster '{}'", + resource_type, name, self.cluster_name + ); + self.linux_host.execute_command("kubectl", &[ + "--context", + &format!("k3d-{}", self.cluster_name), + "get", + resource_type, + name, + "-o", + "yaml", + ]) + } +} + +pub struct CommandScore { + name: String, + command: String, + args: Vec, +} + +impl CommandScore { + pub fn new(name: String, command: String, args: Vec) -> Self { + Self { + name, + command, + args, + } + } +} + +pub trait Interpret { + fn execute(&self, topology: &T) -> Result; +} + +struct CommandInterpret; + +impl Interpret for CommandInterpret +where + T: Topology + CommandCapability, +{ + fn execute(&self, topology: &T) -> Result { + todo!() + } +} + +impl Score for CommandScore +where + T: Topology + CommandCapability, +{ + fn compile(&self) -> Result>, String> { + Ok(Box::new(CommandInterpret {})) + } + + fn name(&self) -> &str { + &self.name + } +} + + +#[derive(Clone)] +pub struct K8sResourceScore { + name: String, + manifest: String, +} + +impl K8sResourceScore { + pub fn new(name: String, manifest: String) -> Self { + Self { name, manifest } + } +} + +struct K8sResourceInterpret { + score: K8sResourceScore, +} + +impl Interpret for K8sResourceInterpret { + fn execute(&self, topology: &T) -> Result { + todo!() + } +} + +impl Score for K8sResourceScore +where + T: Topology + KubernetesCapability, +{ + fn compile(&self) -> Result + 'static)>, String> { + Ok(Box::new(K8sResourceInterpret { + score: self.clone(), + })) + } + + fn name(&self) -> &str { + &self.name + } +} + +pub struct Maestro { + topology: T, + scores: Vec>>, +} + + +impl Maestro { + pub fn new(topology: T) -> Self { + Self { + topology, + scores: Vec::new(), + } + } + + pub fn register_score(&mut self, score: S) + where + S: Score + 'static, + { + println!( + "Registering score '{}' for topology '{}'", + score.name(), + self.topology.name() + ); + self.scores.push(Box::new(score)); + } + + pub fn orchestrate(&self) -> Result<(), String> { + println!("Orchestrating topology '{}'", self.topology.name()); + for score in &self.scores { + let interpret = score.compile()?; + interpret.execute(&self.topology)?; + } + Ok(()) + } +} + +fn main() { + let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string()); + + let mut linux_maestro = Maestro::new(linux_host); + + linux_maestro.register_score(CommandScore::new( + "check-disk".to_string(), + "df".to_string(), + vec!["-h".to_string()], + )); + linux_maestro.orchestrate().unwrap(); + + // This would fail to compile if we tried to register a K8sResourceScore + // because LinuxHostTopology doesn't implement KubernetesCapability + //linux_maestro.register_score(K8sResourceScore::new( + // "...".to_string(), + // "...".to_string(), + //)); + + // Create a K3D topology which has both Command and Kubernetes capabilities + let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string()); + + let k3d_topology = K3DTopology::new( + "dev-cluster".to_string(), + k3d_host, + "devcluster".to_string(), + ); + + // Create a maestro for the K3D topology + let mut k3d_maestro = Maestro::new(k3d_topology); + + // We can register both command scores and kubernetes scores + k3d_maestro.register_score(CommandScore::new( + "check-nodes".to_string(), + "kubectl".to_string(), + vec!["get".to_string(), "nodes".to_string()], + )); + + k3d_maestro.register_score(K8sResourceScore::new( + "deploy-nginx".to_string(), + r#" + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 + "# + .to_string(), + )); + + // Orchestrate both topologies + linux_maestro.orchestrate().unwrap(); + k3d_maestro.orchestrate().unwrap(); +} +``` + + +## Technical take + +The key insight is that we might not need a complex TypeMap or runtime capability checking. Instead, we should leverage Rust's trait system to express capability requirements directly in the type system. + +By clarifying the problem and focusing on type-level solutions rather than runtime checks, we can likely arrive at a simpler, more robust design that leverages the strengths of Rust's type system. + +## Philosophical Shifts + +1. **From Runtime to Compile-Time**: Move capability checking from runtime to compile-time. + +2. **From Objects to Functions**: Think of scores less as objects and more as functions that transform topologies. + +3. **From Homogeneous to Heterogeneous API**: Embrace different API paths for different capability combinations rather than trying to force everything through a single interface. + +4. **From Complex to Simple**: Focus on making common cases simple, even if it means less abstraction for uncommon cases. + +## High level concepts + +The high level concepts so far has evolved towards this definition. + +Topology -> Has -> Capabilities +Score -> Defines -> Work to be done / desired state +Interpret -> Requires -> Capabilities to execute a Score +Maestro -> Enforces -> Compatibility (through the type system at compile time) + +## Why Harmony + +The compile time safety is paramount here. Harmony's main goal is to make the entire software delivery pipeline robust. Current IaC tools are very hard to work with, require complex setups to test and debug real code. + +Leveraging Rust's compiler allows us to shift left a lot of the complexities and frustration that comes with using tools like Ansible that is Yaml based and quickly becomes brittle at scale. Or Terraform, when running a `terraform plan` makes you think everything is correct only to fail horribly when confidently launching `terraform apply` and leaving you with tens or hundreds of resources to clean manually. + +Of course, this requires a significant effort to get to the point where we have actually implemented all the logic. + +But using Rust and a Type Driven Design approach, we believe we are providing a much more robust foundation for our customer's and user's deployments anywhere. + +Also, having the full power of a mature programming language like Rust enables organizations and the community to customize their deployment any way they want, build upon it in a reliable way that has been evolved and proven over decades of enterprise dependency management, API definitions, etc. + +=== + +Given all this c diff --git a/adr/core-abstractions/topology/Cargo.toml b/adr/core-abstractions/topology/Cargo.toml new file mode 100644 index 0000000..96740a9 --- /dev/null +++ b/adr/core-abstractions/topology/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "example-topology" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true +publish = false + +[dependencies] +rand.workspace = true diff --git a/adr/core-abstractions/topology/src/main.rs b/adr/core-abstractions/topology/src/main.rs new file mode 100644 index 0000000..8fc305d --- /dev/null +++ b/adr/core-abstractions/topology/src/main.rs @@ -0,0 +1,232 @@ +// Basic traits from your example +trait Topology {} + +trait Score: Clone + std::fmt::Debug { + fn get_interpret(&self) -> Box>; + fn name(&self) -> String; +} + +trait Interpret { + fn execute(&self); +} + +struct Maestro { + topology: T +} + +impl Maestro { + pub fn new(topology: T) -> Self { + Maestro { topology } + } + + pub fn register_score(&self, score: S) { + println!("Registering score: {}", score.name()); + } + + pub fn execute_score(&self, score: S) { + println!("Executing score: {}", score.name()); + score.get_interpret::().execute(); + } +} + +// Capability traits - these are used to enforce requirements +trait CommandExecution { + fn execute_command(&self, command: &[String]) -> Result; +} + +trait FileSystem { + fn read_file(&self, path: &str) -> Result; + fn write_file(&self, path: &str, content: &str) -> Result<(), String>; +} + +// A concrete topology implementation +#[derive(Clone, Debug)] +struct LinuxHostTopology { + hostname: String, +} + +impl Topology for LinuxHostTopology {} + +// Implement the capabilities for LinuxHostTopology +impl CommandExecution for LinuxHostTopology { + fn execute_command(&self, command: &[String]) -> Result { + println!("Executing command on {}: {:?}", self.hostname, command); + // In a real implementation, this would use std::process::Command + Ok(format!("Command executed successfully on {}", self.hostname)) + } +} + +impl FileSystem for LinuxHostTopology { + fn read_file(&self, path: &str) -> Result { + println!("Reading file {} on {}", path, self.hostname); + Ok(format!("Content of {} on {}", path, self.hostname)) + } + + fn write_file(&self, path: &str, content: &str) -> Result<(), String> { + println!("Writing to file {} on {}: {}", path, self.hostname, content); + Ok(()) + } +} + +// Another topology that doesn't support command execution +#[derive(Clone, Debug)] +struct BareMetalTopology { + device_id: String, +} + +impl Topology for BareMetalTopology {} + +impl FileSystem for BareMetalTopology { + fn read_file(&self, path: &str) -> Result { + println!("Reading file {} on device {}", path, self.device_id); + Ok(format!("Content of {} on device {}", path, self.device_id)) + } + + fn write_file(&self, path: &str, content: &str) -> Result<(), String> { + println!("Writing to file {} on device {}: {}", path, self.device_id, content); + Ok(()) + } +} + +// CommandScore implementation +#[derive(Clone, Debug)] +struct CommandScore { + name: String, + args: Vec, +} + +impl CommandScore { + pub fn new(name: String, args: Vec) -> Self { + CommandScore { name, args } + } +} + +impl Score for CommandScore { + fn get_interpret(&self) -> Box> { + // This is the key part: we constrain T to implement CommandExecution + // If T doesn't implement CommandExecution, this will fail to compile + Box::new(CommandInterpret::::new(self.clone())) + } + + fn name(&self) -> String { + self.name.clone() + } +} + +// CommandInterpret implementation +struct CommandInterpret { + score: CommandScore, + _marker: std::marker::PhantomData, +} + +impl CommandInterpret { + pub fn new(score: CommandScore) -> Self { + CommandInterpret { + score, + _marker: std::marker::PhantomData, + } + } +} + +impl Interpret for CommandInterpret { + fn execute(&self) { + println!("Command interpret is executing: {:?}", self.score.args); + // In a real implementation, you would call the topology's execute_command method + // topology.execute_command(&self.score.args); + } +} + +// FileScore implementation - a different type of score that requires FileSystem capability +#[derive(Clone, Debug)] +struct FileScore { + name: String, + path: String, + content: Option, +} + +impl FileScore { + pub fn new_read(name: String, path: String) -> Self { + FileScore { name, path, content: None } + } + + pub fn new_write(name: String, path: String, content: String) -> Self { + FileScore { name, path, content: Some(content) } + } +} + +impl Score for FileScore { + fn get_interpret(&self) -> Box> { + // This constrains T to implement FileSystem + Box::new(FileInterpret::::new(self.clone())) + } + + fn name(&self) -> String { + self.name.clone() + } +} + +// FileInterpret implementation +struct FileInterpret { + score: FileScore, + _marker: std::marker::PhantomData, +} + +impl FileInterpret { + pub fn new(score: FileScore) -> Self { + FileInterpret { + score, + _marker: std::marker::PhantomData, + } + } +} + +impl Interpret for FileInterpret { + fn execute(&self) { + match &self.score.content { + Some(content) => { + println!("File interpret is writing to {}: {}", self.score.path, content); + // In a real implementation: topology.write_file(&self.score.path, content); + }, + None => { + println!("File interpret is reading from {}", self.score.path); + // In a real implementation: let content = topology.read_file(&self.score.path); + } + } + } +} + +fn main() { + // Create our topologies + let linux = LinuxHostTopology { hostname: "server1.example.com".to_string() }; + let bare_metal = BareMetalTopology { device_id: "device001".to_string() }; + + // Create our maestros + let linux_maestro = Maestro::new(linux); + let bare_metal_maestro = Maestro::new(bare_metal); + + // Create scores + let command_score = CommandScore::new( + "List Files".to_string(), + vec!["ls".to_string(), "-la".to_string()] + ); + + let file_read_score = FileScore::new_read( + "Read Config".to_string(), + "/etc/config.json".to_string() + ); + + // This will work because LinuxHostTopology implements CommandExecution + linux_maestro.execute_score(command_score.clone()); + + // This will work because LinuxHostTopology implements FileSystem + linux_maestro.execute_score(file_read_score.clone()); + + // This will work because BareMetalTopology implements FileSystem + bare_metal_maestro.execute_score(file_read_score); + + // This would NOT compile because BareMetalTopology doesn't implement CommandExecution: + // bare_metal_maestro.execute_score(command_score); + // The error would occur at compile time, ensuring type safety + + println!("All scores executed successfully!"); +} diff --git a/adr/core-abstractions/topology/src/main_claude37_2.rs b/adr/core-abstractions/topology/src/main_claude37_2.rs new file mode 100644 index 0000000..d1b7896 --- /dev/null +++ b/adr/core-abstractions/topology/src/main_claude37_2.rs @@ -0,0 +1,314 @@ +mod main_gemini25pro; +use std::process::Command; + +pub trait Capability {} + +pub trait CommandCapability: Capability { + fn execute_command(&self, command: &str, args: &[&str]) -> Result; +} + +pub trait KubernetesCapability: Capability { + fn apply_manifest(&self, manifest: &str) -> Result<(), String>; + fn get_resource(&self, resource_type: &str, name: &str) -> Result; +} + +pub trait Topology { + fn name(&self) -> &str; +} + +pub trait Score { + fn compile(&self) -> Result>, String>; + fn name(&self) -> &str; +} + +pub struct LinuxHostTopology { + name: String, + host: String, +} + +impl Capability for LinuxHostTopology {} + +impl LinuxHostTopology { + pub fn new(name: String, host: String) -> Self { + Self { name, host } + } +} + +impl Topology for LinuxHostTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for LinuxHostTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> Result { + println!("Executing on {}: {} {:?}", self.host, command, args); + // In a real implementation, this would SSH to the host and execute the command + let output = Command::new(command) + .args(args) + .output() + .map_err(|e| e.to_string())?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } else { + Err(String::from_utf8_lossy(&output.stderr).to_string()) + } + } +} + +pub struct K3DTopology { + name: String, + linux_host: LinuxHostTopology, + cluster_name: String, +} + +impl Capability for K3DTopology {} + +impl K3DTopology { + pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self { + Self { + name, + linux_host, + cluster_name, + } + } +} + +impl Topology for K3DTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for K3DTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> Result { + self.linux_host.execute_command(command, args) + } +} + +impl KubernetesCapability for K3DTopology { + fn apply_manifest(&self, manifest: &str) -> Result<(), String> { + println!("Applying manifest to K3D cluster '{}'", self.cluster_name); + // Write manifest to a temporary file + let temp_file = format!("/tmp/manifest-harmony-temp.yaml"); + + // Use the linux_host directly to avoid capability trait bounds + self.linux_host + .execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?; + + // Apply with kubectl + self.linux_host.execute_command("kubectl", &[ + "--context", + &format!("k3d-{}", self.cluster_name), + "apply", + "-f", + &temp_file, + ])?; + + Ok(()) + } + + fn get_resource(&self, resource_type: &str, name: &str) -> Result { + println!( + "Getting resource {}/{} from K3D cluster '{}'", + resource_type, name, self.cluster_name + ); + self.linux_host.execute_command("kubectl", &[ + "--context", + &format!("k3d-{}", self.cluster_name), + "get", + resource_type, + name, + "-o", + "yaml", + ]) + } +} + +pub struct CommandScore { + name: String, + command: String, + args: Vec, +} + +impl CommandScore { + pub fn new(name: String, command: String, args: Vec) -> Self { + Self { + name, + command, + args, + } + } +} + +pub trait Interpret { + fn execute(&self, topology: &T) -> Result; +} + +struct CommandInterpret; + +impl Interpret for CommandInterpret +where + T: Topology + CommandCapability, +{ + fn execute(&self, topology: &T) -> Result { + todo!() + } +} + +impl Score for CommandScore +where + T: Topology + CommandCapability, +{ + fn compile(&self) -> Result>, String> { + Ok(Box::new(CommandInterpret {})) + } + + fn name(&self) -> &str { + &self.name + } +} + + +#[derive(Clone)] +pub struct K8sResourceScore { + name: String, + manifest: String, +} + +impl K8sResourceScore { + pub fn new(name: String, manifest: String) -> Self { + Self { name, manifest } + } +} + +struct K8sResourceInterpret { + score: K8sResourceScore, +} + +impl Interpret for K8sResourceInterpret { + fn execute(&self, topology: &T) -> Result { + todo!() + } +} + +impl Score for K8sResourceScore +where + T: Topology + KubernetesCapability, +{ + fn compile(&self) -> Result + 'static)>, String> { + Ok(Box::new(K8sResourceInterpret { + score: self.clone(), + })) + } + + fn name(&self) -> &str { + &self.name + } +} + +pub struct Maestro { + topology: T, + scores: Vec>>, +} + + +impl Maestro { + pub fn new(topology: T) -> Self { + Self { + topology, + scores: Vec::new(), + } + } + + pub fn register_score(&mut self, score: S) + where + S: Score + 'static, + { + println!( + "Registering score '{}' for topology '{}'", + score.name(), + self.topology.name() + ); + self.scores.push(Box::new(score)); + } + + pub fn orchestrate(&self) -> Result<(), String> { + println!("Orchestrating topology '{}'", self.topology.name()); + for score in &self.scores { + let interpret = score.compile()?; + interpret.execute(&self.topology)?; + } + Ok(()) + } +} + +fn main() { + let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string()); + + let mut linux_maestro = Maestro::new(linux_host); + + linux_maestro.register_score(CommandScore::new( + "check-disk".to_string(), + "df".to_string(), + vec!["-h".to_string()], + )); + linux_maestro.orchestrate().unwrap(); + + // This would fail to compile if we tried to register a K8sResourceScore + // because LinuxHostTopology doesn't implement KubernetesCapability + //linux_maestro.register_score(K8sResourceScore::new( + // "...".to_string(), + // "...".to_string(), + //)); + + // Create a K3D topology which has both Command and Kubernetes capabilities + let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string()); + + let k3d_topology = K3DTopology::new( + "dev-cluster".to_string(), + k3d_host, + "devcluster".to_string(), + ); + + // Create a maestro for the K3D topology + let mut k3d_maestro = Maestro::new(k3d_topology); + + // We can register both command scores and kubernetes scores + k3d_maestro.register_score(CommandScore::new( + "check-nodes".to_string(), + "kubectl".to_string(), + vec!["get".to_string(), "nodes".to_string()], + )); + + k3d_maestro.register_score(K8sResourceScore::new( + "deploy-nginx".to_string(), + r#" + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 + "# + .to_string(), + )); + + // Orchestrate both topologies + linux_maestro.orchestrate().unwrap(); + k3d_maestro.orchestrate().unwrap(); +} diff --git a/adr/core-abstractions/topology/src/main_claudev1.rs b/adr/core-abstractions/topology/src/main_claudev1.rs new file mode 100644 index 0000000..480fa2c --- /dev/null +++ b/adr/core-abstractions/topology/src/main_claudev1.rs @@ -0,0 +1,323 @@ +use std::marker::PhantomData; +use std::process::Command; + +// ===== Capability Traits ===== + +/// Base trait for all capabilities +pub trait Capability {} + +/// Capability for executing shell commands on a host +pub trait CommandCapability: Capability { + fn execute_command(&self, command: &str, args: &[&str]) -> Result; +} + +/// Capability for interacting with a Kubernetes cluster +pub trait KubernetesCapability: Capability { + fn apply_manifest(&self, manifest: &str) -> Result<(), String>; + fn get_resource(&self, resource_type: &str, name: &str) -> Result; +} + +// ===== Topology Traits ===== + +/// Base trait for all topologies +pub trait Topology { + // Base topology methods that don't depend on capabilities + fn name(&self) -> &str; +} + +// ===== Score Traits ===== + +/// Generic Score trait with an associated Capability type +pub trait Score { + fn apply(&self, topology: &T) -> Result<(), String>; + fn name(&self) -> &str; +} + +// ===== Concrete Topologies ===== + +/// A topology representing a Linux host +pub struct LinuxHostTopology { + name: String, + host: String, +} + +impl LinuxHostTopology { + pub fn new(name: String, host: String) -> Self { + Self { name, host } + } +} + +impl Topology for LinuxHostTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for LinuxHostTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> Result { + println!("Executing on {}: {} {:?}", self.host, command, args); + // In a real implementation, this would SSH to the host and execute the command + let output = Command::new(command) + .args(args) + .output() + .map_err(|e| e.to_string())?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } else { + Err(String::from_utf8_lossy(&output.stderr).to_string()) + } + } +} + +/// A topology representing a K3D Kubernetes cluster +pub struct K3DTopology { + name: String, + linux_host: LinuxHostTopology, + cluster_name: String, +} + +impl K3DTopology { + pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self { + Self { + name, + linux_host, + cluster_name, + } + } +} + +impl Topology for K3DTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for K3DTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> Result { + // Delegate to the underlying Linux host + self.linux_host.execute_command(command, args) + } +} + +impl KubernetesCapability for K3DTopology { + fn apply_manifest(&self, manifest: &str) -> Result<(), String> { + println!("Applying manifest to K3D cluster '{}'", self.cluster_name); + // Write manifest to a temporary file + let temp_file = format!("/tmp/manifest-{}.yaml", rand::random::()); + self.execute_command("bash", &["-c", &format!("cat > {}", temp_file)])?; + + // Apply with kubectl + self.execute_command( + "kubectl", + &["--context", &format!("k3d-{}", self.cluster_name), "apply", "-f", &temp_file] + )?; + + Ok(()) + } + + fn get_resource(&self, resource_type: &str, name: &str) -> Result { + println!("Getting resource {}/{} from K3D cluster '{}'", resource_type, name, self.cluster_name); + self.execute_command( + "kubectl", + &[ + "--context", + &format!("k3d-{}", self.cluster_name), + "get", + resource_type, + name, + "-o", + "yaml", + ] + ) + } +} + +// ===== Concrete Scores ===== + +/// A score that executes commands on a topology +pub struct CommandScore { + name: String, + command: String, + args: Vec, +} + +impl CommandScore { + pub fn new(name: String, command: String, args: Vec) -> Self { + Self { name, command, args } + } +} + +impl Score for CommandScore +where + T: Topology + CommandCapability +{ + fn apply(&self, topology: &T) -> Result<(), String> { + println!("Applying CommandScore '{}' to topology '{}'", self.name, topology.name()); + let args_refs: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect(); + topology.execute_command(&self.command, &args_refs)?; + Ok(()) + } + + fn name(&self) -> &str { + &self.name + } +} + +/// A score that applies Kubernetes resources to a topology +pub struct K8sResourceScore { + name: String, + manifest: String, +} + +impl K8sResourceScore { + pub fn new(name: String, manifest: String) -> Self { + Self { name, manifest } + } +} + +impl Score for K8sResourceScore +where + T: Topology + KubernetesCapability +{ + fn apply(&self, topology: &T) -> Result<(), String> { + println!("Applying K8sResourceScore '{}' to topology '{}'", self.name, topology.name()); + topology.apply_manifest(&self.manifest) + } + + fn name(&self) -> &str { + &self.name + } +} + +// ===== Maestro Orchestrator ===== + +/// Type-safe orchestrator that enforces capability requirements at compile time +pub struct Maestro { + topology: T, + scores: Vec>>, +} + +/// A trait object wrapper that hides the specific Score type but preserves its +/// capability requirements +trait ScoreWrapper { + fn apply(&self, topology: &T) -> Result<(), String>; + fn name(&self) -> &str; +} + +/// Implementation of ScoreWrapper for any Score that works with topology T +impl ScoreWrapper for S +where + T: Topology, + S: Score + 'static +{ + fn apply(&self, topology: &T) -> Result<(), String> { + >::apply(self, topology) + } + + fn name(&self) -> &str { + >::name(self) + } +} + +impl Maestro { + pub fn new(topology: T) -> Self { + Self { + topology, + scores: Vec::new(), + } + } + + /// Register a score that is compatible with this topology's capabilities + pub fn register_score(&mut self, score: S) + where + S: Score + 'static + { + println!("Registering score '{}' for topology '{}'", score.name(), self.topology.name()); + self.scores.push(Box::new(score)); + } + + /// Apply all registered scores to the topology + pub fn orchestrate(&self) -> Result<(), String> { + println!("Orchestrating topology '{}'", self.topology.name()); + for score in &self.scores { + score.apply(&self.topology)?; + } + Ok(()) + } +} + +// ===== Example Usage ===== + +fn main() { + // Create a Linux host topology + let linux_host = LinuxHostTopology::new( + "dev-machine".to_string(), + "localhost".to_string() + ); + + // Create a maestro for the Linux host + let mut linux_maestro = Maestro::new(linux_host); + + // Register a command score that works with any topology having CommandCapability + linux_maestro.register_score(CommandScore::new( + "check-disk".to_string(), + "df".to_string(), + vec!["-h".to_string()] + )); + + // This would fail to compile if we tried to register a K8sResourceScore + // because LinuxHostTopology doesn't implement KubernetesCapability + // linux_maestro.register_score(K8sResourceScore::new(...)); + + // Create a K3D topology which has both Command and Kubernetes capabilities + let k3d_host = LinuxHostTopology::new( + "k3d-host".to_string(), + "localhost".to_string() + ); + + let k3d_topology = K3DTopology::new( + "dev-cluster".to_string(), + k3d_host, + "devcluster".to_string() + ); + + // Create a maestro for the K3D topology + let mut k3d_maestro = Maestro::new(k3d_topology); + + // We can register both command scores and kubernetes scores + k3d_maestro.register_score(CommandScore::new( + "check-nodes".to_string(), + "kubectl".to_string(), + vec!["get".to_string(), "nodes".to_string()] + )); + + k3d_maestro.register_score(K8sResourceScore::new( + "deploy-nginx".to_string(), + r#" + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 + "#.to_string() + )); + + // Orchestrate both topologies + linux_maestro.orchestrate().unwrap(); + k3d_maestro.orchestrate().unwrap(); +} diff --git a/adr/core-abstractions/topology/src/main_gemini25pro.rs b/adr/core-abstractions/topology/src/main_gemini25pro.rs new file mode 100644 index 0000000..d173d83 --- /dev/null +++ b/adr/core-abstractions/topology/src/main_gemini25pro.rs @@ -0,0 +1,369 @@ +// Import necessary items (though for this example, few are needed beyond std) +use std::fmt; + +// --- Error Handling --- +// A simple error type for demonstration purposes. In a real app, use `thiserror` or `anyhow`. +#[derive(Debug)] +enum OrchestrationError { + CommandFailed(String), + KubeClientError(String), + TopologySetupFailed(String), +} + +impl fmt::Display for OrchestrationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + OrchestrationError::CommandFailed(e) => write!(f, "Command execution failed: {}", e), + OrchestrationError::KubeClientError(e) => write!(f, "Kubernetes client error: {}", e), + OrchestrationError::TopologySetupFailed(e) => write!(f, "Topology setup failed: {}", e), + } + } +} + +impl std::error::Error for OrchestrationError {} + +// Define a common Result type +type Result = std::result::Result>; + +// --- 1. Capability Specification (as Traits) --- + +/// Capability trait representing the ability to run Linux commands. +/// This follows the "Parse, Don't Validate" idea implicitly - if you have an object +/// implementing this, you know you *can* run commands, no need to check later. +trait LinuxOperations { + fn run_command(&self, command: &str) -> Result; +} + +/// A mock Kubernetes client trait for demonstration. +trait KubeClient { + fn apply_manifest(&self, manifest: &str) -> Result<()>; + fn get_pods(&self, namespace: &str) -> Result>; +} + +/// Mock implementation of a KubeClient. +struct MockKubeClient { + cluster_name: String, +} + +impl KubeClient for MockKubeClient { + fn apply_manifest(&self, manifest: &str) -> Result<()> { + println!( + "[{}] Applying Kubernetes manifest:\n---\n{}\n---", + self.cluster_name, manifest + ); + // Simulate success or failure + if manifest.contains("invalid") { + Err(Box::new(OrchestrationError::KubeClientError( + "Invalid manifest content".into(), + ))) + } else { + Ok(()) + } + } + fn get_pods(&self, namespace: &str) -> Result> { + println!( + "[{}] Getting pods in namespace '{}'", + self.cluster_name, namespace + ); + Ok(vec![ + format!("pod-a-12345-{}-{}", namespace, self.cluster_name), + format!("pod-b-67890-{}-{}", namespace, self.cluster_name), + ]) + } +} + +/// Capability trait representing access to a Kubernetes cluster. +/// This follows Rust Embedded WG's "Zero-Cost Abstractions" - the trait itself +/// adds no runtime overhead, only compile-time structure. +trait KubernetesCluster { + // Provides access to a Kubernetes client instance. + // Using `impl Trait` in return position for flexibility. + fn get_kube_client(&self) -> Result; +} + +// --- 2. Topology Implementations --- +// Topologies implement the capabilities they provide. + +/// Represents a basic Linux host. +#[derive(Debug, Clone)] +struct LinuxHostTopology { + hostname: String, + // In a real scenario: SSH connection details, etc. +} + +impl LinuxHostTopology { + fn new(hostname: &str) -> Self { + println!("Initializing LinuxHostTopology for {}", hostname); + Self { + hostname: hostname.to_string(), + } + } +} + +// LinuxHostTopology provides LinuxOperations capability. +impl LinuxOperations for LinuxHostTopology { + fn run_command(&self, command: &str) -> Result { + println!("[{}] Running command: '{}'", self.hostname, command); + // Simulate command execution (e.g., via SSH) + if command.starts_with("fail") { + Err(Box::new(OrchestrationError::CommandFailed(format!( + "Command '{}' failed", + command + )))) + } else { + Ok(format!("Output of '{}' on {}", command, self.hostname)) + } + } +} + +/// Represents a K3D (Kubernetes in Docker) cluster running on a host. +#[derive(Debug, Clone)] +struct K3DTopology { + cluster_name: String, + host_os: String, // Example: might implicitly run commands on the underlying host + // In a real scenario: Kubeconfig path, Docker client, etc. +} + +impl K3DTopology { + fn new(cluster_name: &str) -> Self { + println!("Initializing K3DTopology for cluster {}", cluster_name); + Self { + cluster_name: cluster_name.to_string(), + host_os: "Linux".to_string(), // Assume k3d runs on Linux for this example + } + } +} + +// K3DTopology provides KubernetesCluster capability. +impl KubernetesCluster for K3DTopology { + fn get_kube_client(&self) -> Result { + println!("[{}] Creating mock Kubernetes client", self.cluster_name); + // In a real scenario, this would initialize a client using kubeconfig etc. + Ok(MockKubeClient { + cluster_name: self.cluster_name.clone(), + }) + } +} + +// K3DTopology *also* provides LinuxOperations (e.g., for running commands inside nodes or on the host managing k3d). +impl LinuxOperations for K3DTopology { + fn run_command(&self, command: &str) -> Result { + println!( + "[{} on {} host] Running command: '{}'", + self.cluster_name, self.host_os, command + ); + // Simulate command execution (maybe `docker exec` or similar) + if command.starts_with("fail") { + Err(Box::new(OrchestrationError::CommandFailed(format!( + "Command '{}' failed within k3d context", + command + )))) + } else { + Ok(format!( + "Output of '{}' within k3d cluster {}", + command, self.cluster_name + )) + } + } +} + +// --- 3. Score Implementations --- +// Scores require capabilities via trait bounds on their execution logic. + +/// Base trait for identifying scores. Could be empty or hold metadata. +trait Score { + fn name(&self) -> &'static str; + // We don't put execute here, as its signature depends on required capabilities. +} + +/// A score that runs a shell command on a Linux host. +#[derive(Debug)] +struct CommandScore { + command: String, +} + +impl Score for CommandScore { + fn name(&self) -> &'static str { + "CommandScore" + } +} + +impl CommandScore { + fn new(command: &str) -> Self { + Self { + command: command.to_string(), + } + } + + /// Execute method is generic over T, but requires T implements LinuxOperations. + /// This follows the "Scores as Polymorphic Functions" idea. + fn execute(&self, topology: &T) -> Result<()> { + println!("Executing Score: {}", Score::name(self)); + let output = topology.run_command(&self.command)?; + println!("Command Score Output: {}", output); + Ok(()) + } +} + +/// A score that applies a Kubernetes resource manifest. +#[derive(Debug)] +struct K8sResourceScore { + manifest_path: String, // Path or content +} + +impl Score for K8sResourceScore { + fn name(&self) -> &'static str { + "K8sResourceScore" + } +} + +impl K8sResourceScore { + fn new(manifest_path: &str) -> Self { + Self { + manifest_path: manifest_path.to_string(), + } + } + + /// Execute method requires T implements KubernetesCluster. + fn execute(&self, topology: &T) -> Result<()> { + println!("Executing Score: {}", Score::name(self)); + let client = topology.get_kube_client()?; + let manifest_content = format!( + "apiVersion: v1\nkind: Pod\nmetadata:\n name: my-pod-from-{}", + self.manifest_path + ); // Simulate reading file + client.apply_manifest(&manifest_content)?; + println!( + "K8s Resource Score applied manifest: {}", + self.manifest_path + ); + Ok(()) + } +} + +// --- 4. Maestro (The Orchestrator) --- + +// This version of Maestro uses a helper trait (`ScoreRunner`) to enable +// storing heterogeneous scores while preserving compile-time checks. + +/// A helper trait to erase the specific capability requirements *after* +/// the compiler has verified them, allowing storage in a Vec. +/// The verification happens in the blanket impls below. +trait ScoreRunner { + // T is the concrete Topology type + fn run(&self, topology: &T) -> Result<()>; + fn name(&self) -> &'static str; +} + +// Blanket implementation: A CommandScore can be run on any Topology T +// *if and only if* T implements LinuxOperations. +// The compiler checks this bound when `add_score` is called. +impl ScoreRunner for CommandScore { + fn run(&self, topology: &T) -> Result<()> { + self.execute(topology) // Call the capability-specific execute method + } + fn name(&self) -> &'static str { + Score::name(self) + } +} + +// Blanket implementation: A K8sResourceScore can be run on any Topology T +// *if and only if* T implements KubernetesCluster. +impl ScoreRunner for K8sResourceScore { + fn run(&self, topology: &T) -> Result<()> { + self.execute(topology) // Call the capability-specific execute method + } + fn name(&self) -> &'static str { + Score::name(self) + } +} + +/// The Maestro orchestrator, strongly typed to a specific Topology `T`. +struct Maestro { + topology: T, + // Stores type-erased runners, but addition is type-safe. + scores: Vec>>, +} + +impl Maestro { + /// Creates a new Maestro instance bound to a specific topology. + fn new(topology: T) -> Self { + println!("Maestro initialized."); + Maestro { + topology, + scores: Vec::new(), + } + } + + /// Adds a score to the Maestro. + /// **Compile-time check happens here!** + /// The `S: ScoreRunner` bound ensures that the score `S` provides an + /// implementation of `ScoreRunner` *for the specific topology type `T`*. + /// The blanket impls above ensure this is only possible if `T` has the + /// required capabilities for `S`. + /// This directly follows the "Theoretical Example: The Compiler as an Ally". + fn add_score(&mut self, score: S) + where + S: Score + ScoreRunner + 'static, // S must be runnable on *this* T + { + println!("Registering score: {}", Score::name(&score)); + self.scores.push(Box::new(score)); + } + + /// Runs all registered scores sequentially on the topology. + fn run_all(&self) -> Vec> { + println!("\n--- Running all scores ---"); + self.scores + .iter() + .map(|score_runner| { + println!("---"); + let result = score_runner.run(&self.topology); + match &result { + Ok(_) => println!("Score '{}' completed successfully.", score_runner.name()), + Err(e) => eprintln!("Score '{}' failed: {}", score_runner.name(), e), + } + result + }) + .collect() + } +} + +// --- 5. Example Usage --- + +fn main() { + println!("=== Scenario 1: Linux Host Topology ==="); + let linux_host = LinuxHostTopology::new("server1.example.com"); + let mut maestro_linux = Maestro::new(linux_host); + + // Add scores compatible with LinuxHostTopology (which has LinuxOperations) + maestro_linux.add_score(CommandScore::new("uname -a")); + maestro_linux.add_score(CommandScore::new("ls -l /tmp")); + + // *** Compile-time Error Example *** + // Try adding a score that requires KubernetesCluster capability. + // This line WILL NOT COMPILE because LinuxHostTopology does not implement KubernetesCluster, + // therefore K8sResourceScore does not implement ScoreRunner. + // maestro_linux.add_score(K8sResourceScore::new("my-app.yaml")); + // Uncomment the line above to see the compiler error! The error message will + // likely point to the `ScoreRunner` bound not being satisfied + // for `K8sResourceScore`. + + let results_linux = maestro_linux.run_all(); + println!("\nLinux Host Results: {:?}", results_linux); + + println!("\n=== Scenario 2: K3D Topology ==="); + let k3d_cluster = K3DTopology::new("dev-cluster"); + let mut maestro_k3d = Maestro::new(k3d_cluster); + + // Add scores compatible with K3DTopology (which has LinuxOperations AND KubernetesCluster) + maestro_k3d.add_score(CommandScore::new("pwd")); // Uses LinuxOperations + maestro_k3d.add_score(K8sResourceScore::new("nginx-deployment.yaml")); // Uses KubernetesCluster + maestro_k3d.add_score(K8sResourceScore::new("invalid-service.yaml")); // Test error case + maestro_k3d.add_score(CommandScore::new("fail please")); // Test error case + + let results_k3d = maestro_k3d.run_all(); + println!("\nK3D Cluster Results: {:?}", results_k3d); + + println!("\n=== Compile-Time Safety Demonstrated ==="); + println!("(Check the commented-out line in the code for the compile error example)"); +} diff --git a/adr/core-abstractions/topology/src/main_geminifail.rs b/adr/core-abstractions/topology/src/main_geminifail.rs new file mode 100644 index 0000000..938d976 --- /dev/null +++ b/adr/core-abstractions/topology/src/main_geminifail.rs @@ -0,0 +1,492 @@ +use std::any::Any; +use std::fmt::Debug; +use std::process::Command; +pub trait Capability {} + +pub trait CommandCapability: Capability { + fn execute_command(&self, command: &str, args: &Vec) -> Result; +} + +pub trait KubernetesCapability: Capability { + fn apply_manifest(&self, manifest: &str) -> Result<(), String>; + fn get_resource(&self, resource_type: &str, name: &str) -> Result; +} + +pub trait Topology { + fn name(&self) -> &str; +} + +pub trait Interpret { + fn execute(&self, topology: &T) -> Result; +} + +// --- Score Definition Structs (Concrete) --- +// CommandScore struct remains the same +#[derive(Debug, Clone)] // Added Debug/Clone for easier handling +pub struct CommandScore { + name: String, + command: String, + args: Vec, +} + +impl CommandScore { + pub fn new(name: String, command: String, args: Vec) -> Self { + Self { name, command, args } + } +} + +// K8sResourceScore struct remains the same +#[derive(Debug, Clone)] +pub struct K8sResourceScore { + name: String, + manifest: String, +} + +impl K8sResourceScore { + pub fn new(name: String, manifest: String) -> Self { + Self { name, manifest } + } +} + + +// --- Metadata / Base Score Trait (Non-Generic) --- +// Trait for common info and enabling downcasting later if needed +pub trait ScoreDefinition: Debug + Send + Sync { + fn name(&self) -> &str; + // Method to allow downcasting + fn as_any(&self) -> &dyn Any; + // Optional: Could add methods for description, parameters etc. + // fn description(&self) -> &str; + + // Optional but potentially useful: A way to clone the definition + fn box_clone(&self) -> Box; +} + +// Implement Clone for Box +impl Clone for Box { + fn clone(&self) -> Self { + self.box_clone() + } +} + +// Implement ScoreDefinition for your concrete score types +impl ScoreDefinition for CommandScore { + fn name(&self) -> &str { + &self.name + } + fn as_any(&self) -> &dyn Any { + self + } + fn box_clone(&self) -> Box { + Box::new(self.clone()) + } +} + +impl ScoreDefinition for K8sResourceScore { + fn name(&self) -> &str { + &self.name + } + fn as_any(&self) -> &dyn Any { + self + } + fn box_clone(&self) -> Box { + Box::new(self.clone()) + } +} + + +// --- Score Compatibility Trait (Generic over T) --- +// This remains largely the same, ensuring compile-time checks +pub trait Score: ScoreDefinition { + // No need for name() here, it's in ScoreDefinition + fn compile(&self) -> Result>, String>; +} + +// --- Implementations of Score (Crucial Link) --- + +// CommandScore implements Score for any T with CommandCapability +impl Score for CommandScore +where + T: Topology + CommandCapability + 'static, // Added 'static bound often needed for Box + // Self: ScoreDefinition // This bound is implicit now +{ + fn compile(&self) -> Result>, String> { + // Pass necessary data from self to CommandInterpret + Ok(Box::new(CommandInterpret { + command: self.command.clone(), + args: self.args.clone(), + })) + } +} + +// K8sResourceScore implements Score for any T with KubernetesCapability +impl Score for K8sResourceScore +where + T: Topology + KubernetesCapability + 'static, + // Self: ScoreDefinition +{ + fn compile(&self) -> Result>, String> { + Ok(Box::new(K8sResourceInterpret { + manifest: self.manifest.clone(), // Pass needed data + })) + } +} + + +// --- Interpret Implementations --- +// Need to hold the actual data now + +struct CommandInterpret { + command: String, + args: Vec, // Or owned Strings if lifetime is tricky +} + +impl<'a, T> Interpret for CommandInterpret +where + T: Topology + CommandCapability, +{ + fn execute(&self, topology: &T) -> Result { + // Now uses data stored in self + topology.execute_command(&self.command, &self.args) + } +} + +struct K8sResourceInterpret { + manifest: String, +} + +impl Interpret for K8sResourceInterpret { + fn execute(&self, topology: &T) -> Result { + topology.apply_manifest(&self.manifest)?; + // apply_manifest returns Result<(), String>, adapt if needed + Ok(format!("Applied manifest for {}", topology.name())) // Example success message + } +} + +// --- Maestro --- +// Maestro remains almost identical, leveraging the Score bound +pub struct Maestro { + topology: T, + // Stores Score trait objects, ensuring compatibility + scores: Vec>>, +} + +impl Maestro { // Often need T: 'static here + pub fn new(topology: T) -> Self { + Self { + topology, + scores: Vec::new(), + } + } + + // This method signature is key - it takes a concrete S + // and the compiler checks if S implements Score + pub fn register_score(&mut self, score: S) -> Result<(), String> + where + S: Score + ScoreDefinition + Clone + 'static, // Ensure S is a Score for *this* T + // We might need S: Clone if we want to store Box::new(score) + // Alternatively, accept Box and try to downcast/wrap + { + println!( + "Registering score '{}' for topology '{}'", + score.name(), + self.topology.name() + ); + // The compiler has already guaranteed that S implements Score + // We need to box it as dyn Score + self.scores.push(Box::new(score)); + Ok(()) + } + + // Alternative registration if you have Box + pub fn register_score_definition(&mut self, score_def: Box) -> Result<(), String> + where + T: Topology + CommandCapability + KubernetesCapability + 'static, // Example: list all needed caps here, or use generics + downcasting + { + println!( + "Attempting to register score '{}' for topology '{}'", + score_def.name(), + self.topology.name() + ); + + // Downcast to check concrete type and then check compatibility + if let Some(cs) = score_def.as_any().downcast_ref::() { + // Check if T satisfies CommandScore's requirements (CommandCapability) + // This check is somewhat manual or needs restructuring if we avoid listing all caps + // A simpler way is to just try to create the Box> + let boxed_score: Box> = Box::new(cs.clone()); // This relies on the blanket impls + self.scores.push(boxed_score); + Ok(()) + } else if let Some(ks) = score_def.as_any().downcast_ref::() { + // Check if T satisfies K8sResourceScore's requirements (KubernetesCapability) + let boxed_score: Box> = Box::new(ks.clone()); + self.scores.push(boxed_score); + Ok(()) + } else { + Err(format!("Score '{}' is of an unknown type or incompatible", score_def.name())) + } + // This downcasting approach in Maestro slightly undermines the full compile-time + // check unless designed carefully. The generic `register_score>` is safer. + } + + + pub fn orchestrate(&self) -> Result<(), String> { + println!("Orchestrating topology '{}'", self.topology.name()); + for score in &self.scores { + println!("Compiling score '{}'", score.name()); // Use name() from ScoreDefinition + let interpret = score.compile()?; + println!("Executing score '{}'", score.name()); + interpret.execute(&self.topology)?; + } + Ok(()) + } +} + +// --- TUI Example --- +struct ScoreItem { + // Holds the definition/metadata, NOT the Score trait object + definition: Box, +} + +struct HarmonyTui { + // List of available score *definitions* + available_scores: Vec, + // Example: Maybe maps topology names to Maestros + // maestros: HashMap>, // Storing Maestros generically is another challenge! +} + +impl HarmonyTui { + fn new() -> Self { + HarmonyTui { available_scores: vec![] } + } + + fn add_available_score(&mut self, score_def: Box) { + self.available_scores.push(ScoreItem { definition: score_def }); + } + + fn display_scores(&self) { + println!("Available Scores:"); + for (i, item) in self.available_scores.iter().enumerate() { + println!("{}: {}", i, item.definition.name()); + } + } + + fn execute_score(&self, score: ScoreItem) { + score.definition. + + } + + // Example: Function to add a selected score to a specific Maestro + // This function would need access to the Maestros and handle the types + fn add_selected_score_to_maestro( + &self, + score_index: usize, + maestro: &mut Maestro + ) -> Result<(), String> + where + T: Topology + CommandCapability + KubernetesCapability + 'static, // Adjust bounds as needed + { + let score_item = self.available_scores.get(score_index) + .ok_or("Invalid score index")?; + + // We have Box, need to add to Maestro + // Easiest is to downcast and call the generic register_score + + if let Some(cs) = score_item.definition.as_any().downcast_ref::() { + // Compiler checks if CommandScore: Score via register_score's bound + maestro.register_score(cs.clone())?; + Ok(()) + } else if let Some(ks) = score_item.definition.as_any().downcast_ref::() { + // Compiler checks if K8sResourceScore: Score via register_score's bound + maestro.register_score(ks.clone())?; + Ok(()) + } else { + Err(format!("Cannot add score '{}': Unknown type or check Maestro compatibility", score_item.definition.name())) + } + } +} + +pub struct K3DTopology { + name: String, + linux_host: LinuxHostTopology, + cluster_name: String, +} + +impl Capability for K3DTopology {} + +impl K3DTopology { + pub fn new(name: String, linux_host: LinuxHostTopology, cluster_name: String) -> Self { + Self { + name, + linux_host, + cluster_name, + } + } +} + +impl Topology for K3DTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for K3DTopology { + fn execute_command(&self, command: &str, args: &Vec) -> Result { + self.linux_host.execute_command(command, args) + } +} + +impl KubernetesCapability for K3DTopology { + fn apply_manifest(&self, manifest: &str) -> Result<(), String> { + println!("Applying manifest to K3D cluster '{}'", self.cluster_name); + // Write manifest to a temporary file + let temp_file = format!("/tmp/manifest-harmony-temp.yaml"); + + // Use the linux_host directly to avoid capability trait bounds + self.linux_host + .execute_command("bash", &Vec::from(["-c".to_string(), format!("cat > {}", temp_file)]))?; + + // Apply with kubectl + self.linux_host.execute_command("kubectl", &Vec::from([ + "--context".to_string(), + format!("k3d-{}", self.cluster_name), + "apply".to_string(), + "-f".to_string(), + temp_file.to_string(), + ]))?; + + Ok(()) + } + + fn get_resource(&self, resource_type: &str, name: &str) -> Result { + println!( + "Getting resource {}/{} from K3D cluster '{}'", + resource_type, name, self.cluster_name + ); + self.linux_host.execute_command("kubectl", &Vec::from([ + "--context".to_string(), + format!("k3d-{}", self.cluster_name), + "get".to_string(), + resource_type.to_string(), + name.to_string(), + "-o".to_string(), + "yaml".to_string(), + ])) + } +} + + +pub struct LinuxHostTopology { + name: String, + host: String, +} +impl Capability for LinuxHostTopology {} + +impl LinuxHostTopology { + pub fn new(name: String, host: String) -> Self { + Self { name, host } + } +} + +impl Topology for LinuxHostTopology { + fn name(&self) -> &str { + &self.name + } +} + +impl CommandCapability for LinuxHostTopology { + fn execute_command(&self, command: &str, args: &Vec) -> Result { + println!("Executing on {}: {} {:?}", self.host, command, args); + // In a real implementation, this would SSH to the host and execute the command + let output = Command::new(command) + .args(args) + .output() + .map_err(|e| e.to_string())?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } else { + Err(String::from_utf8_lossy(&output.stderr).to_string()) + } + } +} + + + +// --- Main Function Adapated --- +fn main() { + // --- Linux Host --- + let linux_host = LinuxHostTopology::new("dev-machine".to_string(), "localhost".to_string()); + let mut linux_maestro = Maestro::new(linux_host); + + let df_score = CommandScore::new( + "check-disk".to_string(), + "df".to_string(), + vec!["-h".to_string()], + ); + + // Registration uses the generic method, compiler checks CommandScore: Score + linux_maestro.register_score(df_score.clone()).unwrap(); // clone needed if df_score used later + + // --- K3D Host --- + let k3d_host = LinuxHostTopology::new("k3d-host".to_string(), "localhost".to_string()); + let k3d_topology = K3DTopology::new( + "dev-cluster".to_string(), + k3d_host, + "devcluster".to_string(), + ); + let mut k3d_maestro = Maestro::new(k3d_topology); + + let nodes_score = CommandScore::new( + "check-nodes".to_string(), + "kubectl".to_string(), + vec!["get".to_string(), "nodes".to_string()], + ); + let nginx_score = K8sResourceScore::new( + "deploy-nginx".to_string(), + // ... manifest string ... + r#"..."#.to_string(), + ); + + // Compiler checks CommandScore: Score + k3d_maestro.register_score(nodes_score.clone()).unwrap(); + // Compiler checks K8sResourceScore: Score + k3d_maestro.register_score(nginx_score.clone()).unwrap(); + + + // --- TUI Example Usage --- + let mut tui = HarmonyTui::new(); + // Add score *definitions* to the TUI + tui.add_available_score(Box::new(df_score)); + tui.add_available_score(Box::new(nodes_score)); + tui.add_available_score(Box::new(nginx_score)); + + tui.display_scores(); + + // Simulate user selecting score 0 (check-disk) and adding to linux_maestro + match tui.add_selected_score_to_maestro(0, &mut linux_maestro) { + Ok(_) => println!("Successfully registered check-disk to linux_maestro via TUI selection"), + Err(e) => println!("Failed: {}", e), // Should succeed + } + + // Simulate user selecting score 2 (deploy-nginx) and adding to linux_maestro + match tui.add_selected_score_to_maestro(2, &mut linux_maestro) { + Ok(_) => println!("Successfully registered deploy-nginx to linux_maestro via TUI selection"), // Should fail! + Err(e) => println!("Correctly failed to add deploy-nginx to linux_maestro: {}", e), + // The failure happens inside add_selected_score_to_maestro because the + // maestro.register_score(ks.clone()) call fails the trait bound check + // K8sResourceScore: Score is false. + } + + // Simulate user selecting score 2 (deploy-nginx) and adding to k3d_maestro + match tui.add_selected_score_to_maestro(2, &mut k3d_maestro) { + Ok(_) => println!("Successfully registered deploy-nginx to k3d_maestro via TUI selection"), // Should succeed + Err(e) => println!("Failed: {}", e), + } + + // --- Orchestration --- + println!("\n--- Orchestrating Linux Maestro ---"); + linux_maestro.orchestrate().unwrap(); + println!("\n--- Orchestrating K3D Maestro ---"); + k3d_maestro.orchestrate().unwrap(); +} diff --git a/adr/core-abstractions/topology/src/main_right.rs b/adr/core-abstractions/topology/src/main_right.rs new file mode 100644 index 0000000..baa6c6c --- /dev/null +++ b/adr/core-abstractions/topology/src/main_right.rs @@ -0,0 +1,129 @@ +use std::marker::PhantomData; + +// Capability Trait Hierarchy +pub trait Capability {} + +// Specific Capability Traits +pub trait ShellAccess: Capability {} +pub trait ContainerRuntime: Capability {} +pub trait KubernetesAccess: Capability {} +pub trait FileSystemAccess: Capability {} + +// Topology Trait - Defines the core interface for infrastructure topologies +pub trait Topology { + type Capabilities: Capability; + + fn name(&self) -> &str; +} + +// Score Trait - Defines the core interface for infrastructure transformation +pub trait Score { + type RequiredCapabilities: Capability; + type OutputTopology: Topology; + + fn apply(&self, topology: T) -> Result; +} + +// Linux Host Topology +pub struct LinuxHostTopology; + +impl Topology for LinuxHostTopology { + type Capabilities = dyn ShellAccess + FileSystemAccess; + + fn name(&self) -> &str { + "Linux Host" + } +} + +impl ShellAccess for LinuxHostTopology {} +impl FileSystemAccess for LinuxHostTopology {} + +// K3D Topology +pub struct K3DTopology; + +impl Topology for K3DTopology { + type Capabilities = dyn ContainerRuntime + KubernetesAccess + ShellAccess; + + fn name(&self) -> &str { + "K3D Kubernetes Cluster" + } +} + +impl ContainerRuntime for K3DTopology {} +impl KubernetesAccess for K3DTopology {} +impl ShellAccess for K3DTopology {} + +// Command Score - A score that requires shell access +pub struct CommandScore { + command: String, +} + +impl Score for CommandScore { + type RequiredCapabilities = dyn ShellAccess; + type OutputTopology = LinuxHostTopology; + + fn apply(&self, _topology: T) -> Result + where + T: ShellAccess + { + // Simulate command execution + println!("Executing command: {}", self.command); + Ok(LinuxHostTopology) + } +} + +// Kubernetes Resource Score +pub struct K8sResourceScore { + resource_definition: String, +} + +impl Score for K8sResourceScore { + type RequiredCapabilities = dyn KubernetesAccess; + type OutputTopology = K3DTopology; + + fn apply(&self, _topology: T) -> Result + where + T: dyn KubernetesAccess + { + // Simulate Kubernetes resource application + println!("Applying K8s resource: {}", self.resource_definition); + Ok(K3DTopology) + } +} + +// Maestro - The orchestration coordinator +pub struct Maestro; + +impl Maestro { + // Type-safe score application + pub fn apply_score(topology: T, score: S) -> Result + where + T: Topology, + S: Score, + T: S::RequiredCapabilities + { + score.apply(topology) + } +} + +fn main() { + // Example usage demonstrating type-driven design + let linux_host = LinuxHostTopology; + let k3d_cluster = K3DTopology; + + // Command score on Linux host + let command_score = CommandScore { + command: "echo 'Hello, World!'".to_string(), + }; + + let result = Maestro::apply_score(linux_host, command_score) + .expect("Command score application failed"); + + // K8s resource score on K3D cluster + let k8s_score = K8sResourceScore { + resource_definition: "apiVersion: v1\nkind: Pod\n...".to_string(), + }; + + let k8s_result = Maestro::apply_score(k3d_cluster, k8s_score) + .expect("K8s resource score application failed"); +} diff --git a/adr/core-abstractions/topology/src/main_v1.rs b/adr/core-abstractions/topology/src/main_v1.rs new file mode 100644 index 0000000..3ae3b11 --- /dev/null +++ b/adr/core-abstractions/topology/src/main_v1.rs @@ -0,0 +1,155 @@ +mod main_right; +mod main_claude; +// Capability Traits + +trait Capability {} + +trait LinuxOperations: Capability { + fn execute_command(&self, command: &str) -> Result; +} + +trait KubernetesOperations: Capability { + fn create_resource(&self, resource: &str) -> Result; + fn delete_resource(&self, resource: &str) -> Result; +} + +// Topology Implementations + +struct LinuxHostTopology; + +impl LinuxOperations for LinuxHostTopology { + fn execute_command(&self, command: &str) -> Result { + // Implementation for executing commands on a Linux host + Ok(format!("Executed command: {}", command)) + } +} + +impl Capability for LinuxHostTopology {} + +struct K3DTopology; + +impl KubernetesOperations for K3DTopology { + fn create_resource(&self, resource: &str) -> Result { + // Implementation for creating Kubernetes resources in K3D + Ok(format!("Created resource: {}", resource)) + } + + fn delete_resource(&self, resource: &str) -> Result { + // Implementation for deleting Kubernetes resources in K3D + Ok(format!("Deleted resource: {}", resource)) + } +} + +impl Capability for K3DTopology {} + +// Score Implementations + +struct K8sResourceScore { + resource: String, +} + +impl Score for K8sResourceScore +where + T: KubernetesOperations, +{ + fn execute(&self, topology: &T) -> Result { + topology.create_resource(&self.resource) + } +} + +struct CommandScore { + command: String, +} + +impl Score for CommandScore +where + T: LinuxOperations + 'static, +{ + fn execute(&self, topology: &T) -> Result { + topology.execute_command(&self.command) + } +} + +// Score Trait + +trait Score +where + T: Capability + 'static, +{ + fn execute(&self, topology: &T) -> Result; +} + +// Maestro Implementation + +struct Maestro { + scores: Vec>>>, +} + +impl Maestro { + fn new() -> Self { + Maestro { scores: Vec::new() } + } + + fn register_score(&mut self, score: Box) + where + T: Score> + 'static, + { + self.scores.push(Box::new(score)); + } + + fn execute_scores(&self, topology: &T) -> Result, String> + where + T: Capability + 'static, + { + let mut results = Vec::new(); + for score in &self.scores { + if let Some(score) = score.as_any().downcast_ref::>>() { + results.push(score.execute(topology)?); + } + } + Ok(results) + } +} + +// Helper trait for downcasting + +trait AsAny { + fn as_any(&self) -> &dyn std::any::Any; +} + +impl AsAny for T { + fn as_any(&self) -> &dyn std::any::Any { + self + } +} + +// Main Function + +fn main() { + let mut maestro = Maestro::new(); + + let k8s_score = K8sResourceScore { + resource: "deployment.yaml".to_string(), + }; + maestro.register_score(k8s_score); + + let command_score = CommandScore { + command: "ls -l".to_string(), + }; + maestro.register_score(command_score); + + let linux_topology = LinuxHostTopology; + let k3d_topology = K3DTopology; + + let linux_results = maestro.execute_scores(&linux_topology).unwrap(); + println!("Linux Topology Results:"); + for result in linux_results { + println!("{}", result); + } + + let k3d_results = maestro.execute_scores(&k3d_topology).unwrap(); + println!("K3D Topology Results:"); + for result in k3d_results { + println!("{}", result); + } +} diff --git a/adr/core-abstractions/topology2/Cargo.toml b/adr/core-abstractions/topology2/Cargo.toml new file mode 100644 index 0000000..ef57a6d --- /dev/null +++ b/adr/core-abstractions/topology2/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "example-topology2" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true +publish = false + +[dependencies] diff --git a/adr/core-abstractions/topology2/src/main.rs b/adr/core-abstractions/topology2/src/main.rs new file mode 100644 index 0000000..313afa1 --- /dev/null +++ b/adr/core-abstractions/topology2/src/main.rs @@ -0,0 +1,183 @@ +// Clean capability-based design using type parameters + +trait Capability {} + +trait K8sCapability: Capability { + fn deploy_k8s_resource(&self, resource_yaml: &str); + fn execute_kubectl(&self, command: &str) -> String; +} + +trait LinuxCapability: Capability { + fn execute_command(&self, command: &str, args: &[&str]); + fn download_file(&self, url: &str, destination: &str) -> Result<(), String>; +} + +trait LoadBalancerCapability: Capability { + fn configure_load_balancer(&self, services: &[&str], port: u16); + fn get_load_balancer_status(&self) -> String; +} + +// Score trait with capability type parameter +trait Score { + fn execute(&self, capability: &C) -> String; +} + +// Topology implementations with marker trait +trait Topology {} + +struct K3DTopology {} +impl Topology for K3DTopology {} +impl Capability for K3DTopology {} +impl K8sCapability for K3DTopology { + fn deploy_k8s_resource(&self, resource_yaml: &str) { + todo!() + } + + fn execute_kubectl(&self, command: &str) -> String { + todo!() + } + // Implementation... +} + +struct LinuxTopology {} +impl Topology for LinuxTopology {} +impl Capability for LinuxTopology {} +impl LinuxCapability for LinuxTopology { + fn execute_command(&self, command: &str, args: &[&str]) { + todo!() + } + + fn download_file(&self, url: &str, destination: &str) -> Result<(), String> { + todo!() + } + // Implementation... +} + +struct OKDHaClusterTopology {} +impl Topology for OKDHaClusterTopology {} +impl Capability for OKDHaClusterTopology {} +impl K8sCapability for OKDHaClusterTopology { + fn deploy_k8s_resource(&self, resource_yaml: &str) { + todo!() + } + + fn execute_kubectl(&self, command: &str) -> String { + todo!() + } + // Implementation... +} +impl LinuxCapability for OKDHaClusterTopology { + fn execute_command(&self, command: &str, args: &[&str]) { + todo!() + } + + fn download_file(&self, url: &str, destination: &str) -> Result<(), String> { + todo!() + } + // Implementation... +} +impl LoadBalancerCapability for OKDHaClusterTopology { + fn configure_load_balancer(&self, services: &[&str], port: u16) { + todo!() + } + + fn get_load_balancer_status(&self) -> String { + todo!() + } + // Implementation... +} + +// Score implementations +struct LAMPScore {} +impl Score for LAMPScore { + fn execute(&self, capability: &dyn K8sCapability) -> String { + todo!() + // Implementation... + } +} + +struct BinaryScore {} +impl Score for BinaryScore { + fn execute(&self, capability: &dyn LinuxCapability) -> String { + todo!() + // Implementation... + } +} + +struct LoadBalancerScore {} +impl Score for LoadBalancerScore { + fn execute(&self, capability: &dyn LoadBalancerCapability) -> String { + todo!() + // Implementation... + } +} + +// Generic Maestro +struct Maestro { + topology: T, + scores: Vec String>>, +} + +impl Maestro { + fn new(topology: T) -> Self { + Self { + topology, + scores: Vec::new(), + } + } + + fn interpret_all(&mut self) -> Vec { + self.scores.iter_mut() + .map(|score| score(&self.topology)) + .collect() + } +} + +// Capability-specific extensions +impl Maestro { + fn register_k8s_score + 'static>(&mut self, score: S) { + let score_box = Box::new(move |topology: &T| { + score.execute(topology as &dyn K8sCapability) + }); + self.scores.push(score_box); + } +} + +impl Maestro { + fn register_linux_score + 'static>(&mut self, score: S) { + let score_box = Box::new(move |topology: &T| { + score.execute(topology as &dyn LinuxCapability) + }); + self.scores.push(score_box); + } +} + +impl Maestro { + fn register_lb_score + 'static>(&mut self, score: S) { + let score_box = Box::new(move |topology: &T| { + score.execute(topology as &dyn LoadBalancerCapability) + }); + self.scores.push(score_box); + } +} + +fn main() { + // Example usage + let k3d = K3DTopology {}; + let mut k3d_maestro = Maestro::new(k3d); + + // These will compile because K3D implements K8sCapability + k3d_maestro.register_k8s_score(LAMPScore {}); + + // This would not compile because K3D doesn't implement LoadBalancerCapability + // k3d_maestro.register_lb_score(LoadBalancerScore {}); + + let linux = LinuxTopology {}; + let mut linux_maestro = Maestro::new(linux); + + // This will compile because Linux implements LinuxCapability + linux_maestro.register_linux_score(BinaryScore {}); + + // This would not compile because Linux doesn't implement K8sCapability + // linux_maestro.register_k8s_score(LAMPScore {}); +} diff --git a/adr/core-abstractions/topology2/src/main_capabilities.rs b/adr/core-abstractions/topology2/src/main_capabilities.rs new file mode 100644 index 0000000..499069a --- /dev/null +++ b/adr/core-abstractions/topology2/src/main_capabilities.rs @@ -0,0 +1,324 @@ +fn main() { + // Create various topologies + let okd_topology = OKDHaClusterTopology::new(); + let k3d_topology = K3DTopology::new(); + let linux_topology = LinuxTopology::new(); + + // Create scores + let lamp_score = LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4"); + let binary_score = BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"]); + let load_balancer_score = LoadBalancerScore::new(vec!["service1", "service2"], 80); + + // Example 1: Running LAMP stack on OKD + println!("\n=== Deploying LAMP stack on OKD cluster ==="); + lamp_score.execute(&okd_topology); + + // Example 2: Running LAMP stack on K3D + println!("\n=== Deploying LAMP stack on K3D cluster ==="); + lamp_score.execute(&k3d_topology); + + // Example 3: Running binary on Linux host + println!("\n=== Running binary on Linux host ==="); + binary_score.execute(&linux_topology); + + // Example 4: Running binary on OKD (which can also run Linux commands) + println!("\n=== Running binary on OKD host ==="); + binary_score.execute(&okd_topology); + + // Example 5: Load balancer configuration on OKD + println!("\n=== Configuring load balancer on OKD ==="); + load_balancer_score.execute(&okd_topology); + + // The following would not compile: + // load_balancer_score.execute(&k3d_topology); // K3D doesn't implement LoadBalancerCapability + // lamp_score.execute(&linux_topology); // Linux doesn't implement K8sCapability +} + +// Base Topology trait +trait Topology { + fn name(&self) -> &str; +} + +// Define capabilities +trait K8sCapability { + fn deploy_k8s_resource(&self, resource_yaml: &str); + fn execute_kubectl(&self, command: &str) -> String; +} + +trait OKDCapability: K8sCapability { + fn execute_oc(&self, command: &str) -> String; +} + +trait LinuxCapability { + fn execute_command(&self, command: &str, args: &[&str]) -> String; + fn download_file(&self, url: &str, destination: &str) -> Result<(), String>; +} + +trait LoadBalancerCapability { + fn configure_load_balancer(&self, services: &[&str], port: u16); + fn get_load_balancer_status(&self) -> String; +} + +trait FirewallCapability { + fn open_port(&self, port: u16, protocol: &str); + fn close_port(&self, port: u16, protocol: &str); +} + +trait RouterCapability { + fn configure_route(&self, service: &str, hostname: &str); +} + +// Topology implementations +struct OKDHaClusterTopology { + cluster_name: String, +} + +impl OKDHaClusterTopology { + fn new() -> Self { + Self { + cluster_name: "okd-ha-cluster".to_string(), + } + } +} + +impl Topology for OKDHaClusterTopology { + fn name(&self) -> &str { + &self.cluster_name + } +} + +impl K8sCapability for OKDHaClusterTopology { + fn deploy_k8s_resource(&self, resource_yaml: &str) { + println!("Deploying K8s resource on OKD cluster: {}", resource_yaml); + } + + fn execute_kubectl(&self, command: &str) -> String { + println!("Executing kubectl command on OKD cluster: {}", command); + "kubectl command output".to_string() + } +} + +impl OKDCapability for OKDHaClusterTopology { + fn execute_oc(&self, command: &str) -> String { + println!("Executing oc command on OKD cluster: {}", command); + "oc command output".to_string() + } +} + +impl LinuxCapability for OKDHaClusterTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> String { + println!( + "Executing command '{}' with args {:?} on OKD node", + command, args + ); + todo!() + } + + fn download_file(&self, url: &str, destination: &str) -> Result<(), String> { + println!( + "Downloading file from {} to {} on OKD node", + url, destination + ); + Ok(()) + } +} + +impl LoadBalancerCapability for OKDHaClusterTopology { + fn configure_load_balancer(&self, services: &[&str], port: u16) { + println!( + "Configuring load balancer for services {:?} on port {} in OKD", + services, port + ); + } + + fn get_load_balancer_status(&self) -> String { + "OKD Load Balancer: HEALTHY".to_string() + } +} + +impl FirewallCapability for OKDHaClusterTopology { + fn open_port(&self, port: u16, protocol: &str) { + println!( + "Opening port {} with protocol {} on OKD firewall", + port, protocol + ); + } + + fn close_port(&self, port: u16, protocol: &str) { + println!( + "Closing port {} with protocol {} on OKD firewall", + port, protocol + ); + } +} + +impl RouterCapability for OKDHaClusterTopology { + fn configure_route(&self, service: &str, hostname: &str) { + println!( + "Configuring route for service {} with hostname {} on OKD", + service, hostname + ); + } +} + +struct K3DTopology { + cluster_name: String, +} + +impl K3DTopology { + fn new() -> Self { + Self { + cluster_name: "k3d-local".to_string(), + } + } +} + +impl Topology for K3DTopology { + fn name(&self) -> &str { + &self.cluster_name + } +} + +impl K8sCapability for K3DTopology { + fn deploy_k8s_resource(&self, resource_yaml: &str) { + println!("Deploying K8s resource on K3D cluster: {}", resource_yaml); + } + + fn execute_kubectl(&self, command: &str) -> String { + println!("Executing kubectl command on K3D cluster: {}", command); + "kubectl command output from K3D".to_string() + } +} + +struct LinuxTopology { + hostname: String, +} + +impl LinuxTopology { + fn new() -> Self { + Self { + hostname: "linux-host".to_string(), + } + } +} + +impl Topology for LinuxTopology { + fn name(&self) -> &str { + &self.hostname + } +} + +impl LinuxCapability for LinuxTopology { + fn execute_command(&self, command: &str, args: &[&str]) -> String { + println!( + "Executing command '{}' with args {:?} on Linux host", + command, args + ); + todo!() + } + + fn download_file(&self, url: &str, destination: &str) -> Result<(), String> { + println!( + "Downloading file from {} to {} on Linux host", + url, destination + ); + Ok(()) + } +} + +// Score implementations +struct LAMPScore { + mysql_version: String, + php_version: String, + apache_version: String, +} + +impl LAMPScore { + fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self { + Self { + mysql_version: mysql_version.to_string(), + php_version: php_version.to_string(), + apache_version: apache_version.to_string(), + } + } + + fn execute(&self, topology: &T) { + // Deploy MySQL + topology.deploy_k8s_resource("mysql-deployment.yaml"); + + // Deploy PHP + topology.deploy_k8s_resource("php-deployment.yaml"); + + // Deploy Apache + topology.deploy_k8s_resource("apache-deployment.yaml"); + + // Create service + topology.deploy_k8s_resource("lamp-service.yaml"); + + // Check deployment + let status = topology.execute_kubectl("get pods -l app=lamp"); + println!("LAMP deployment status: {}", status); + } +} + +struct BinaryScore { + url: String, + args: Vec, +} + +impl BinaryScore { + fn new(url: &str, args: Vec<&str>) -> Self { + Self { + url: url.to_string(), + args: args.iter().map(|s| s.to_string()).collect(), + } + } + + fn execute(&self, topology: &T) { + let destination = "/tmp/binary"; + + match topology.download_file(&self.url, destination) { + Ok(_) => { + println!("Binary downloaded successfully"); + + // Convert args to slice of &str + let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect(); + + // Execute the binary + topology.execute_command(destination, &args); + println!("Binary execution completed"); + } + Err(e) => { + println!("Failed to download binary: {}", e); + } + } + } +} + +struct LoadBalancerScore { + services: Vec, + port: u16, +} + +impl LoadBalancerScore { + fn new(services: Vec<&str>, port: u16) -> Self { + Self { + services: services.iter().map(|s| s.to_string()).collect(), + port, + } + } + + fn execute(&self, topology: &T) { + println!("Configuring load balancer for services"); + + // Convert services to slice of &str + let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect(); + + // Configure load balancer + topology.configure_load_balancer(&services, self.port); + + // Check status + let status = topology.get_load_balancer_status(); + println!("Load balancer status: {}", status); + } +} diff --git a/adr/core-abstractions/topology2/src/main_v1.rs b/adr/core-abstractions/topology2/src/main_v1.rs new file mode 100644 index 0000000..0771470 --- /dev/null +++ b/adr/core-abstractions/topology2/src/main_v1.rs @@ -0,0 +1,34 @@ +fn main() {} + +trait Topology {} + +struct DummyTopology {} + +impl Topology for DummyTopology {} + +impl Topology for LampTopology {} + +struct LampTopology {} + +struct Maestro { + topology: Box, +} + +trait Score { + type Topology: Topology; + fn execute(&self, topology: &Self::Topology); +} + +struct K8sScore {} +impl Score for K8sScore { + type Topology = LampTopology; + fn execute(&self, topology: &Box) { + todo!() + } +} + +impl Maestro { + pub fn execute(&self, score: Box>) { + score.execute(&self.topology); + } +} diff --git a/adr/core-abstractions/topology2/src/main_v2.rs b/adr/core-abstractions/topology2/src/main_v2.rs new file mode 100644 index 0000000..865d0dd --- /dev/null +++ b/adr/core-abstractions/topology2/src/main_v2.rs @@ -0,0 +1,76 @@ +fn main() { + // Example usage + let lamp_topology = LampTopology {}; + let k8s_score = K8sScore {}; + let docker_topology = DockerTopology{}; + + // Type-safe execution + let maestro = Maestro::new(Box::new(docker_topology)); + maestro.execute(&k8s_score); // This will work + + // This would fail at compile time if we tried: + // let dummy_topology = DummyTopology {}; + // let maestro = Maestro::new(Box::new(dummy_topology)); + // maestro.execute(&k8s_score); // Error: expected LampTopology, found DummyTopology +} + +// Base trait for all topologies +trait Topology { + // Common topology methods could go here + fn topology_type(&self) -> &str; +} + +struct DummyTopology {} +impl Topology for DummyTopology { + fn topology_type(&self) -> &str { "Dummy" } +} + +struct LampTopology {} +impl Topology for LampTopology { + fn topology_type(&self) -> &str { "LAMP" } +} + +struct DockerTopology {} + +impl Topology for DockerTopology { + fn topology_type(&self) -> &str { + todo!("DockerTopology") + } +} + +// The Score trait with an associated type for the required topology +trait Score { + type RequiredTopology: Topology + ?Sized; + fn execute(&self, topology: &Self::RequiredTopology); + fn score_type(&self) -> &str; +} + +// A score that requires LampTopology +struct K8sScore {} +impl Score for K8sScore { + type RequiredTopology = DockerTopology; + + fn execute(&self, topology: &Self::RequiredTopology) { + println!("Executing K8sScore on {} topology", topology.topology_type()); + // Implementation details... + } + + fn score_type(&self) -> &str { "K8s" } +} + +// A generic maestro that can work with any topology type +struct Maestro { + topology: Box, +} + +impl Maestro { + pub fn new(topology: Box) -> Self { + Maestro { topology } + } + + // Execute a score that requires this specific topology type + pub fn execute>(&self, score: &S) { + println!("Maestro executing {} score", score.score_type()); + score.execute(&*self.topology); + } +} diff --git a/adr/core-abstractions/topology2/src/main_v4.rs b/adr/core-abstractions/topology2/src/main_v4.rs new file mode 100644 index 0000000..8f8d004 --- /dev/null +++ b/adr/core-abstractions/topology2/src/main_v4.rs @@ -0,0 +1,360 @@ +fn main() { + // Create topologies + let okd_topology = OKDHaClusterTopology::new(); + let k3d_topology = K3DTopology::new(); + let linux_topology = LinuxTopology::new(); + + // Create scores - boxing them as trait objects for dynamic dispatch + let scores: Vec> = vec![ + Box::new(LAMPScore::new("MySQL 8.0", "PHP 8.1", "Apache 2.4")), + Box::new(BinaryScore::new("https://example.com/binary", vec!["--arg1", "--arg2"])), + Box::new(LoadBalancerScore::new(vec!["service1", "service2"], 80)), + ]; + + // Running scores on OKD topology (which has all capabilities) + println!("\n=== Running all scores on OKD HA Cluster ==="); + for score in &scores { + match score.execute(&okd_topology) { + Ok(result) => println!("Score executed successfully: {}", result), + Err(e) => println!("Failed to execute score: {}", e), + } + } + + // Running scores on K3D topology (only has K8s capability) + println!("\n=== Running scores on K3D Cluster ==="); + for score in &scores { + match score.execute(&k3d_topology) { + Ok(result) => println!("Score executed successfully: {}", result), + Err(e) => println!("Failed to execute score: {}", e), + } + } + + // Running scores on Linux topology (only has Linux capability) + println!("\n=== Running scores on Linux Host ==="); + for score in &scores { + match score.execute(&linux_topology) { + Ok(result) => println!("Score executed successfully: {}", result), + Err(e) => println!("Failed to execute score: {}", e), + } + } +} + +// Base Topology trait +trait Topology: Any { + fn name(&self) -> &str; + + // This method allows us to get type information at runtime + fn as_any(&self) -> &dyn Any; +} + +// Use Any trait for runtime type checking +use std::any::Any; + +// Define capabilities +trait K8sCapability { + fn deploy_k8s_resource(&self, resource_yaml: &str); + fn execute_kubectl(&self, command: &str) -> String; +} + +trait OKDCapability: K8sCapability { + fn execute_oc(&self, command: &str) -> String; +} + +trait LinuxCapability { + fn execute_command(&self, command: &str, args: &[&str]); + fn download_file(&self, url: &str, destination: &str) -> Result<(), String>; +} + +trait LoadBalancerCapability { + fn configure_load_balancer(&self, services: &[&str], port: u16); + fn get_load_balancer_status(&self) -> String; +} + +// Base Score trait with dynamic dispatch +trait Score { + // Generic execute method that takes any topology + fn execute(&self, topology: &dyn Topology) -> Result; + + // Optional method to get score type for better error messages + fn score_type(&self) -> &str; +} + +// Topology implementations +struct OKDHaClusterTopology { + cluster_name: String, +} + +impl OKDHaClusterTopology { + fn new() -> Self { + Self { cluster_name: "okd-ha-cluster".to_string() } + } +} + +impl Topology for OKDHaClusterTopology { + fn name(&self) -> &str { + &self.cluster_name + } + + fn as_any(&self) -> &dyn Any { + self + } +} + +impl K8sCapability for OKDHaClusterTopology { + fn deploy_k8s_resource(&self, resource_yaml: &str) { + println!("Deploying K8s resource on OKD cluster: {}", resource_yaml); + } + + fn execute_kubectl(&self, command: &str) -> String { + println!("Executing kubectl command on OKD cluster: {}", command); + "kubectl command output".to_string() + } +} + +impl OKDCapability for OKDHaClusterTopology { + fn execute_oc(&self, command: &str) -> String { + println!("Executing oc command on OKD cluster: {}", command); + "oc command output".to_string() + } +} + +impl LinuxCapability for OKDHaClusterTopology { + fn execute_command(&self, command: &str, args: &[&str]) { + println!("Executing command '{}' with args {:?} on OKD node", command, args); + } + + fn download_file(&self, url: &str, destination: &str) -> Result<(), String> { + println!("Downloading file from {} to {} on OKD node", url, destination); + Ok(()) + } +} + +impl LoadBalancerCapability for OKDHaClusterTopology { + fn configure_load_balancer(&self, services: &[&str], port: u16) { + println!("Configuring load balancer for services {:?} on port {} in OKD", services, port); + } + + fn get_load_balancer_status(&self) -> String { + "OKD Load Balancer: HEALTHY".to_string() + } +} + +struct K3DTopology { + cluster_name: String, +} + +impl K3DTopology { + fn new() -> Self { + Self { cluster_name: "k3d-local".to_string() } + } +} + +impl Topology for K3DTopology { + fn name(&self) -> &str { + &self.cluster_name + } + + fn as_any(&self) -> &dyn Any { + self + } +} + +impl K8sCapability for K3DTopology { + fn deploy_k8s_resource(&self, resource_yaml: &str) { + println!("Deploying K8s resource on K3D cluster: {}", resource_yaml); + } + + fn execute_kubectl(&self, command: &str) -> String { + println!("Executing kubectl command on K3D cluster: {}", command); + "kubectl command output from K3D".to_string() + } +} + +struct LinuxTopology { + hostname: String, +} + +impl LinuxTopology { + fn new() -> Self { + Self { hostname: "linux-host".to_string() } + } +} + +impl Topology for LinuxTopology { + fn name(&self) -> &str { + &self.hostname + } + + fn as_any(&self) -> &dyn Any { + self + } +} + +impl LinuxCapability for LinuxTopology { + fn execute_command(&self, command: &str, args: &[&str]) { + println!("Executing command '{}' with args {:?} on Linux host", command, args); + } + + fn download_file(&self, url: &str, destination: &str) -> Result<(), String> { + println!("Downloading file from {} to {} on Linux host", url, destination); + Ok(()) + } +} + +// Score implementations using dynamic capability checks +struct LAMPScore { + mysql_version: String, + php_version: String, + apache_version: String, +} + +impl LAMPScore { + fn new(mysql_version: &str, php_version: &str, apache_version: &str) -> Self { + Self { + mysql_version: mysql_version.to_string(), + php_version: php_version.to_string(), + apache_version: apache_version.to_string(), + } + } + + // Helper method for typesafe execution + fn execute_with_k8s(&self, topology: &dyn K8sCapability) -> String { + println!("Deploying LAMP stack with MySQL {}, PHP {}, Apache {}", + self.mysql_version, self.php_version, self.apache_version); + + // Deploy MySQL + topology.deploy_k8s_resource("mysql-deployment.yaml"); + + // Deploy PHP + topology.deploy_k8s_resource("php-deployment.yaml"); + + // Deploy Apache + topology.deploy_k8s_resource("apache-deployment.yaml"); + + // Create service + topology.deploy_k8s_resource("lamp-service.yaml"); + + // Check deployment + let status = topology.execute_kubectl("get pods -l app=lamp"); + format!("LAMP deployment status: {}", status) + } +} + +impl Score for LAMPScore { + fn execute(&self, topology: &dyn Topology) -> Result { + // Try to downcast to K8sCapability + if let Some(k8s_topology) = topology.as_any().downcast_ref::() { + Ok(self.execute_with_k8s(k8s_topology)) + } else if let Some(k8s_topology) = topology.as_any().downcast_ref::() { + Ok(self.execute_with_k8s(k8s_topology)) + } else { + Err(format!("LAMPScore requires K8sCapability but topology {} doesn't provide it", + topology.name())) + } + } + + fn score_type(&self) -> &str { + "LAMP" + } +} + +struct BinaryScore { + url: String, + args: Vec, +} + +impl BinaryScore { + fn new(url: &str, args: Vec<&str>) -> Self { + Self { + url: url.to_string(), + args: args.iter().map(|s| s.to_string()).collect(), + } + } + + // Helper method for typesafe execution + fn execute_with_linux(&self, topology: &dyn LinuxCapability) -> Result { + let destination = "/tmp/binary"; + + // Download the binary + println!("Preparing to run binary from {}", self.url); + + match topology.download_file(&self.url, destination) { + Ok(_) => { + println!("Binary downloaded successfully"); + + // Convert args to slice of &str + let args: Vec<&str> = self.args.iter().map(|s| s.as_str()).collect(); + + // Execute the binary + topology.execute_command(destination, &args); + Ok("Binary execution completed successfully".to_string()) + }, + Err(e) => { + Err(format!("Failed to download binary: {}", e)) + } + } + } +} + +impl Score for BinaryScore { + fn execute(&self, topology: &dyn Topology) -> Result { + // Try to downcast to LinuxCapability + if let Some(linux_topology) = topology.as_any().downcast_ref::() { + self.execute_with_linux(linux_topology) + } else if let Some(linux_topology) = topology.as_any().downcast_ref::() { + self.execute_with_linux(linux_topology) + } else { + Err(format!("BinaryScore requires LinuxCapability but topology {} doesn't provide it", + topology.name())) + } + } + + fn score_type(&self) -> &str { + "Binary" + } +} + +struct LoadBalancerScore { + services: Vec, + port: u16, +} + +impl LoadBalancerScore { + fn new(services: Vec<&str>, port: u16) -> Self { + Self { + services: services.iter().map(|s| s.to_string()).collect(), + port, + } + } + + // Helper method for typesafe execution + fn execute_with_lb(&self, topology: &dyn LoadBalancerCapability) -> String { + println!("Configuring load balancer for services"); + + // Convert services to slice of &str + let services: Vec<&str> = self.services.iter().map(|s| s.as_str()).collect(); + + // Configure load balancer + topology.configure_load_balancer(&services, self.port); + + // Check status + let status = topology.get_load_balancer_status(); + format!("Load balancer configured successfully. Status: {}", status) + } +} + +impl Score for LoadBalancerScore { + fn execute(&self, topology: &dyn Topology) -> Result { + // Only OKDHaClusterTopology implements LoadBalancerCapability + if let Some(lb_topology) = topology.as_any().downcast_ref::() { + Ok(self.execute_with_lb(lb_topology)) + } else { + Err(format!("LoadBalancerScore requires LoadBalancerCapability but topology {} doesn't provide it", + topology.name())) + } + } + + fn score_type(&self) -> &str { + "LoadBalancer" + } +} diff --git a/docs/diagrams/Harmony_Core_Architecture.drawio.svg b/docs/diagrams/Harmony_Core_Architecture.drawio.svg new file mode 100644 index 0000000..ba6e9fc --- /dev/null +++ b/docs/diagrams/Harmony_Core_Architecture.drawio.svg @@ -0,0 +1,4 @@ + + + +
create_interpret
Score<T>
A score defines a desirable state.

It can then be read by an Interpre
t that will apply the Score's
desired state to the Topology
Interpret<T>
An Interpret<T> knows how to apply the desired state from a score on a Topology T

The Interpret declares the Capabilities trait bounds required so it can execute its job.

Think of the musical metaphor : the Interpret reads a  trumpet score and requires a trumpet to play it.

In Harmony, the Interpret reads a DNS Score and requires a Topology that has the DNS Capability to apply it.
Maestro<T>
A Maestro owns a list of registered scores and a Topology.

The Maestro can executes Scores<T> on Topologies = T when the Topology T has all the required capabilities of the Score<T>

This compatibility will be verified at compile time thanks to the <T: Topology> bound. The program will not compile if a Score requires a Capability that is not provided by the Topology<T> compiled in the Maestro<T>
Topology
A topology is a group of ressource that provide specific Capabilities.

For example, an OPNSenseFirewall Topology will provide many core Capabilities including : DNS Server, Router, Firewall, HTTP Server, Unix Shell, OPNSenseShell, etc.

But it will not provide incompatible capabilities such as CephStorage, ContainerRuntime, Kubernetes, BrocadeSwitch, etc.
Inventory
The Inventory contains the hardware components. Everything from the physical address to a MacAddres may be defined in an Inventory.

This allows for bare metal topologies to operate Scores that will manage bare metal ressources such as physical hosts, switches, etc. Typically, in an hyperconverged cluster, Inventory components are treated like cattle. The Topology will make sure to attain the desired state by exploiting the Inventory components. If an inventory component fails (hard drive, server, GPU, etc), the Maestro can react and make sure that all the Scores are still applied properly, and refresh them if necessary, or raise alerts if the desired state cannot be restored automatically.
A Capability is a concept that is not implemented in code,
but is central to the way we define Scores,
Interprets and Topologies.

Capabilities are extremely diverse, but
 generally are an expression of a standard
 such as DNS Server, Packet Router, LoadBalancer, 
PXE Server, PXE Host, K8sCluster, etc.
\ No newline at end of file diff --git a/examples/lamp/Cargo.toml b/examples/lamp/Cargo.toml new file mode 100644 index 0000000..1bdcf68 --- /dev/null +++ b/examples/lamp/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "example-lamp" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true +publish = false + +[dependencies] +harmony = { path = "../../harmony" } +#harmony_tui = { path = "../../harmony_tui" } +harmony_types = { path = "../../harmony_types" } +cidr = { workspace = true } +tokio = { workspace = true } +harmony_macros = { path = "../../harmony_macros" } +log = { workspace = true } +env_logger = { workspace = true } +url = { workspace = true } diff --git a/examples/lamp/php/index.php b/examples/lamp/php/index.php new file mode 100644 index 0000000..6cf1a50 --- /dev/null +++ b/examples/lamp/php/index.php @@ -0,0 +1,3 @@ + diff --git a/examples/lamp/src/main.rs b/examples/lamp/src/main.rs new file mode 100644 index 0000000..7277aa5 --- /dev/null +++ b/examples/lamp/src/main.rs @@ -0,0 +1,24 @@ +use harmony::{ + data::Version, + maestro::Maestro, + modules::lamp::{LAMPConfig, LAMPScore}, + topology::{HAClusterTopology, Url}, +}; + +#[tokio::main] +async fn main() { + let lamp_stack = LAMPScore { + name: "harmony-lamp-demo".to_string(), + domain: Url::Url(url::Url::parse("https://lampdemo.harmony.nationtech.io").unwrap()), + php_version: Version::from("8.4.4").unwrap(), + config: LAMPConfig { + project_root: "./php".into(), + ..Default::default() + }, + }; + + Maestro::::load_from_env() + .interpret(Box::new(lamp_stack)) + .await + .unwrap(); +} diff --git a/examples/nanodc/src/main.rs b/examples/nanodc/src/main.rs index 3a683d0..8aad09a 100644 --- a/examples/nanodc/src/main.rs +++ b/examples/nanodc/src/main.rs @@ -1,7 +1,10 @@ use harmony::{ inventory::Inventory, maestro::Maestro, - modules::{dummy::{ErrorScore, PanicScore, SuccessScore}, k8s::deployment::K8sDeploymentScore}, + modules::{ + dummy::{ErrorScore, PanicScore, SuccessScore}, + k8s::deployment::K8sDeploymentScore, + }, topology::HAClusterTopology, }; @@ -12,6 +15,12 @@ async fn main() { let mut maestro = Maestro::new(inventory, topology); maestro.register_all(vec![ + // ADD scores : + // 1. OPNSense setup scores + // 2. Bootstrap node setup + // 3. Control plane setup + // 4. Workers setup + // 5. Various tools and apps setup Box::new(SuccessScore {}), Box::new(ErrorScore {}), Box::new(PanicScore {}), diff --git a/examples/opnsense/src/main.rs b/examples/opnsense/src/main.rs index 6315390..ddf781d 100644 --- a/examples/opnsense/src/main.rs +++ b/examples/opnsense/src/main.rs @@ -12,7 +12,7 @@ use harmony::{ modules::{ dummy::{ErrorScore, PanicScore, SuccessScore}, http::HttpScore, - okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore}, + okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore}, opnsense::OPNsenseShellCommandScore, tftp::TftpScore, }, @@ -78,8 +78,7 @@ async fn main() { let dhcp_score = OKDDhcpScore::new(&topology, &inventory); let dns_score = OKDDnsScore::new(&topology); - let load_balancer_score = - harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); + let load_balancer_score = OKDLoadBalancerScore::new(&topology); let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); let http_score = HttpScore::new(Url::LocalFolder( diff --git a/examples/tui/src/main.rs b/examples/tui/src/main.rs index 3a683d0..05a768b 100644 --- a/examples/tui/src/main.rs +++ b/examples/tui/src/main.rs @@ -1,7 +1,7 @@ use harmony::{ inventory::Inventory, maestro::Maestro, - modules::{dummy::{ErrorScore, PanicScore, SuccessScore}, k8s::deployment::K8sDeploymentScore}, + modules::dummy::{ErrorScore, PanicScore, SuccessScore}, topology::HAClusterTopology, }; diff --git a/harmony/Cargo.toml b/harmony/Cargo.toml index f084af0..c5348d9 100644 --- a/harmony/Cargo.toml +++ b/harmony/Cargo.toml @@ -29,3 +29,4 @@ kube = { workspace = true } k8s-openapi = { workspace = true } serde_yaml = { workspace = true } http = { workspace = true } +serde-value = { workspace = true } diff --git a/harmony/src/domain/hardware/mod.rs b/harmony/src/domain/hardware/mod.rs index 47d7a33..8e24768 100644 --- a/harmony/src/domain/hardware/mod.rs +++ b/harmony/src/domain/hardware/mod.rs @@ -2,6 +2,8 @@ use std::sync::Arc; use derive_new::new; use harmony_types::net::MacAddress; +use serde::{Serialize, Serializer, ser::SerializeStruct}; +use serde_value::Value; pub type HostGroup = Vec; pub type SwitchGroup = Vec; @@ -75,10 +77,7 @@ impl PhysicalHost { } pub fn label(mut self, name: String, value: String) -> Self { - self.labels.push(Label { - _name: name, - _value: value, - }); + self.labels.push(Label { name, value }); self } @@ -88,7 +87,49 @@ impl PhysicalHost { } } -#[derive(new)] +// Custom Serialize implementation for PhysicalHost +impl Serialize for PhysicalHost { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Determine the number of fields + let mut num_fields = 5; // category, network, storage, labels, management + if self.memory_size.is_some() { + num_fields += 1; + } + if self.cpu_count.is_some() { + num_fields += 1; + } + + // Create a serialization structure + let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?; + + // Serialize the standard fields + state.serialize_field("category", &self.category)?; + state.serialize_field("network", &self.network)?; + state.serialize_field("storage", &self.storage)?; + state.serialize_field("labels", &self.labels)?; + + // Serialize optional fields + if let Some(memory) = self.memory_size { + state.serialize_field("memory_size", &memory)?; + } + if let Some(cpu) = self.cpu_count { + state.serialize_field("cpu_count", &cpu)?; + } + + let mgmt_data = self.management.serialize_management(); + // pub management: Arc, + + // Handle management interface - either as a field or flattened + state.serialize_field("management", &mgmt_data)?; + + state.end() + } +} + +#[derive(new, Serialize)] pub struct ManualManagementInterface; impl ManagementInterface for ManualManagementInterface { @@ -101,7 +142,7 @@ impl ManagementInterface for ManualManagementInterface { } } -pub trait ManagementInterface: Send + Sync { +pub trait ManagementInterface: Send + Sync + SerializableManagement { fn boot_to_pxe(&self); fn get_supported_protocol_names(&self) -> String; } @@ -115,21 +156,49 @@ impl std::fmt::Debug for dyn ManagementInterface { } } -#[derive(Debug, Clone)] +// Define a trait for serializing management interfaces +pub trait SerializableManagement { + fn serialize_management(&self) -> Value; +} + +// Provide a blanket implementation for all types that implement both ManagementInterface and Serialize +impl SerializableManagement for T +where + T: ManagementInterface + Serialize, +{ + fn serialize_management(&self) -> Value { + serde_value::to_value(self).expect("ManagementInterface should serialize successfully") + } +} + +#[derive(Debug, Clone, Serialize)] pub enum HostCategory { Server, Firewall, Switch, } -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct NetworkInterface { pub name: Option, pub mac_address: MacAddress, pub speed: Option, } -#[derive(Debug, new, Clone)] +#[cfg(test)] +use harmony_macros::mac_address; +#[cfg(test)] +impl NetworkInterface { + pub fn dummy() -> Self { + Self { + name: Some(String::new()), + mac_address: mac_address!("00:00:00:00:00:00"), + speed: Some(0), + } + } +} + +#[derive(Debug, new, Clone, Serialize)] pub enum StorageConnectionType { Sata3g, Sata6g, @@ -137,13 +206,13 @@ pub enum StorageConnectionType { Sas12g, PCIE, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub enum StorageKind { SSD, NVME, HDD, } -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct Storage { pub connection: StorageConnectionType, pub kind: StorageKind, @@ -151,20 +220,33 @@ pub struct Storage { pub serial: String, } -#[derive(Debug, Clone)] +#[cfg(test)] +impl Storage { + pub fn dummy() -> Self { + Self { + connection: StorageConnectionType::Sata3g, + kind: StorageKind::SSD, + size: 0, + serial: String::new(), + } + } +} + +#[derive(Debug, Clone, Serialize)] pub struct Switch { _interface: Vec, _management_interface: NetworkInterface, } -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct Label { - _name: String, - _value: String, + pub name: String, + pub value: String, } + pub type Address = String; -#[derive(new, Debug)] +#[derive(new, Debug, Serialize)] pub struct Location { pub address: Address, pub name: String, @@ -178,3 +260,158 @@ impl Location { } } } + +#[cfg(test)] +mod tests { + use super::*; + use serde::{Deserialize, Serialize}; + use std::sync::Arc; + + // Mock implementation of ManagementInterface + #[derive(Debug, Clone, Serialize, Deserialize)] + struct MockHPIlo { + ip: String, + username: String, + password: String, + firmware_version: String, + } + + impl ManagementInterface for MockHPIlo { + fn boot_to_pxe(&self) {} + + fn get_supported_protocol_names(&self) -> String { + String::new() + } + } + + // Another mock implementation + #[derive(Debug, Clone, Serialize, Deserialize)] + struct MockDellIdrac { + hostname: String, + port: u16, + api_token: String, + } + + impl ManagementInterface for MockDellIdrac { + fn boot_to_pxe(&self) {} + + fn get_supported_protocol_names(&self) -> String { + String::new() + } + } + + #[test] + fn test_serialize_physical_host_with_hp_ilo() { + // Create a PhysicalHost with HP iLO management + let host = PhysicalHost { + category: HostCategory::Server, + network: vec![NetworkInterface::dummy()], + management: Arc::new(MockHPIlo { + ip: "192.168.1.100".to_string(), + username: "admin".to_string(), + password: "password123".to_string(), + firmware_version: "2.5.0".to_string(), + }), + storage: vec![Storage::dummy()], + labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())], + memory_size: Some(64_000_000), + cpu_count: Some(16), + }; + + // Serialize to JSON + let json = serde_json::to_string(&host).expect("Failed to serialize host"); + + // Check that the serialized JSON contains the HP iLO details + assert!(json.contains("192.168.1.100")); + assert!(json.contains("admin")); + assert!(json.contains("password123")); + assert!(json.contains("firmware_version")); + assert!(json.contains("2.5.0")); + + // Parse back to verify structure (not the exact management interface) + let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON"); + + // Verify basic structure + assert_eq!(parsed["cpu_count"], 16); + assert_eq!(parsed["memory_size"], 64_000_000); + assert_eq!(parsed["network"][0]["name"], ""); + } + + #[test] + fn test_serialize_physical_host_with_dell_idrac() { + // Create a PhysicalHost with Dell iDRAC management + let host = PhysicalHost { + category: HostCategory::Server, + network: vec![NetworkInterface::dummy()], + management: Arc::new(MockDellIdrac { + hostname: "idrac-server01".to_string(), + port: 443, + api_token: "abcdef123456".to_string(), + }), + storage: vec![Storage::dummy()], + labels: vec![Label::new("env".to_string(), "production".to_string())], + memory_size: Some(128_000_000), + cpu_count: Some(32), + }; + + // Serialize to JSON + let json = serde_json::to_string(&host).expect("Failed to serialize host"); + + // Check that the serialized JSON contains the Dell iDRAC details + assert!(json.contains("idrac-server01")); + assert!(json.contains("443")); + assert!(json.contains("abcdef123456")); + + // Parse back to verify structure + let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON"); + + // Verify basic structure + assert_eq!(parsed["cpu_count"], 32); + assert_eq!(parsed["memory_size"], 128_000_000); + assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null); + } + + #[test] + fn test_different_management_implementations_produce_valid_json() { + // Create hosts with different management implementations + let host1 = PhysicalHost { + category: HostCategory::Server, + network: vec![], + management: Arc::new(MockHPIlo { + ip: "10.0.0.1".to_string(), + username: "root".to_string(), + password: "secret".to_string(), + firmware_version: "3.0.0".to_string(), + }), + storage: vec![], + labels: vec![], + memory_size: None, + cpu_count: None, + }; + + let host2 = PhysicalHost { + category: HostCategory::Server, + network: vec![], + management: Arc::new(MockDellIdrac { + hostname: "server02-idrac".to_string(), + port: 8443, + api_token: "token123".to_string(), + }), + storage: vec![], + labels: vec![], + memory_size: None, + cpu_count: None, + }; + + // Both should serialize successfully + let json1 = serde_json::to_string(&host1).expect("Failed to serialize host1"); + let json2 = serde_json::to_string(&host2).expect("Failed to serialize host2"); + + // Both JSONs should be valid and parseable + let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1"); + let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2"); + + // The JSONs should be different because they contain different management interfaces + assert_ne!(json1, json2); + } +} diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index 731d663..9cec988 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -7,7 +7,7 @@ use super::{ data::{Id, Version}, executors::ExecutorError, inventory::Inventory, - topology::HAClusterTopology, + topology::Topology, }; pub enum InterpretName { @@ -37,12 +37,9 @@ impl std::fmt::Display for InterpretName { } #[async_trait] -pub trait Interpret: std::fmt::Debug + Send { - async fn execute( - &self, - inventory: &Inventory, - topology: &HAClusterTopology, - ) -> Result; +pub trait Interpret: std::fmt::Debug + Send { + async fn execute(&self, inventory: &Inventory, topology: &T) + -> Result; fn get_name(&self) -> InterpretName; fn get_version(&self) -> Version; fn get_status(&self) -> InterpretStatus; diff --git a/harmony/src/domain/maestro/mod.rs b/harmony/src/domain/maestro/mod.rs index ed28173..256c759 100644 --- a/harmony/src/domain/maestro/mod.rs +++ b/harmony/src/domain/maestro/mod.rs @@ -3,22 +3,22 @@ use std::sync::{Arc, RwLock}; use log::info; use super::{ - interpret::{Interpret, InterpretError, Outcome}, + interpret::{InterpretError, Outcome}, inventory::Inventory, score::Score, - topology::HAClusterTopology, + topology::Topology, }; -type ScoreVec = Vec>; +type ScoreVec = Vec>>; -pub struct Maestro { +pub struct Maestro { inventory: Inventory, - topology: HAClusterTopology, - scores: Arc>, + topology: T, + scores: Arc>>, } -impl Maestro { - pub fn new(inventory: Inventory, topology: HAClusterTopology) -> Self { +impl Maestro { + pub fn new(inventory: Inventory, topology: T) -> Self { Self { inventory, topology, @@ -26,16 +26,37 @@ impl Maestro { } } + // Load the inventory and inventory from environment. + // This function is able to discover the context that it is running in, such as k8s clusters, aws cloud, linux host, etc. + // When the HARMONY_TOPOLOGY environment variable is not set, it will default to install k3s + // locally (lazily, if not installed yet, when the first execution occurs) and use that as a topology + // So, by default, the inventory is a single host that the binary is running on, and the + // topology is a single node k3s + // + // By default : + // - Linux => k3s + // - macos, windows => docker compose + // + // To run more complex cases like OKDHACluster, either provide the default target in the + // harmony infrastructure as code or as an environment variable + pub fn load_from_env() -> Self { + // Load env var HARMONY_TOPOLOGY + match std::env::var("HARMONY_TOPOLOGY") { + Ok(_) => todo!(), + Err(_) => todo!(), + } + } + pub fn start(&mut self) { info!("Starting Maestro"); } - pub fn register_all(&mut self, mut scores: ScoreVec) { + pub fn register_all(&mut self, mut scores: ScoreVec) { let mut score_mut = self.scores.write().expect("Should acquire lock"); score_mut.append(&mut scores); } - pub async fn interpret(&self, score: Box) -> Result { + pub async fn interpret(&self, score: Box>) -> Result { info!("Running score {score:?}"); let interpret = score.create_interpret(); info!("Launching interpret {interpret:?}"); @@ -44,7 +65,7 @@ impl Maestro { result } - pub fn scores(&self) -> Arc> { + pub fn scores(&self) -> Arc>> { self.scores.clone() } } diff --git a/harmony/src/domain/score.rs b/harmony/src/domain/score.rs index 7b2c790..dbe7aa5 100644 --- a/harmony/src/domain/score.rs +++ b/harmony/src/domain/score.rs @@ -1,7 +1,41 @@ -use super::interpret::Interpret; +use serde::Serialize; +use serde_value::Value; -pub trait Score: std::fmt::Debug + Send + Sync { - fn create_interpret(&self) -> Box; +use super::{interpret::Interpret, topology::Topology}; + +pub trait Score: + std::fmt::Debug + Send + Sync + CloneBoxScore + SerializeScore +{ + fn create_interpret(&self) -> Box>; fn name(&self) -> String; - fn clone_box(&self) -> Box; +} + +pub trait SerializeScore { + fn serialize(&self) -> Value; +} + +impl<'de, S, T> SerializeScore for S +where + T: Topology, + S: Score + Serialize, +{ + fn serialize(&self) -> Value { + // TODO not sure if this is the right place to handle the error or it should bubble + // up? + serde_value::to_value(&self).expect("Score should serialize successfully") + } +} + +pub trait CloneBoxScore { + fn clone_box(&self) -> Box>; +} + +impl CloneBoxScore for S +where + T: Topology, + S: Score + Clone + 'static, +{ + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } } diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index ba7c063..0e9230b 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -15,9 +15,11 @@ use super::IpAddress; use super::LoadBalancer; use super::LoadBalancerService; use super::LogicalHost; +use super::OcK8sclient; use super::Router; use super::TftpServer; +use super::Topology; use super::Url; use super::openshift::OpenshiftClient; use std::sync::Arc; @@ -38,11 +40,20 @@ pub struct HAClusterTopology { pub switch: Vec, } -impl HAClusterTopology { - pub async fn oc_client(&self) -> Result, kube::Error> { +impl Topology for HAClusterTopology { + fn name(&self) -> &str { + todo!() + } +} + +#[async_trait] +impl OcK8sclient for HAClusterTopology { + async fn oc_client(&self) -> Result, kube::Error> { Ok(Arc::new(OpenshiftClient::try_default().await?)) } +} +impl HAClusterTopology { pub fn autoload() -> Self { let dummy_infra = Arc::new(DummyInfra {}); let dummy_host = LogicalHost { @@ -67,6 +78,143 @@ impl HAClusterTopology { } } +#[async_trait] +impl DnsServer for HAClusterTopology { + async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> { + self.dns_server.register_dhcp_leases(register).await + } + async fn register_hosts(&self, hosts: Vec) -> Result<(), ExecutorError> { + self.dns_server.register_hosts(hosts).await + } + fn remove_record(&self, name: &str, record_type: DnsRecordType) -> Result<(), ExecutorError> { + self.dns_server.remove_record(name, record_type) + } + async fn list_records(&self) -> Vec { + self.dns_server.list_records().await + } + fn get_ip(&self) -> IpAddress { + self.dns_server.get_ip() + } + fn get_host(&self) -> LogicalHost { + self.dns_server.get_host() + } + async fn commit_config(&self) -> Result<(), ExecutorError> { + self.dns_server.commit_config().await + } +} + +#[async_trait] +impl LoadBalancer for HAClusterTopology { + fn get_ip(&self) -> IpAddress { + self.load_balancer.get_ip() + } + fn get_host(&self) -> LogicalHost { + self.load_balancer.get_host() + } + async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { + self.load_balancer.add_service(service).await + } + async fn remove_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { + self.load_balancer.remove_service(service).await + } + async fn list_services(&self) -> Vec { + self.load_balancer.list_services().await + } + async fn ensure_initialized(&self) -> Result<(), ExecutorError> { + self.load_balancer.ensure_initialized().await + } + async fn commit_config(&self) -> Result<(), ExecutorError> { + self.load_balancer.commit_config().await + } + async fn reload_restart(&self) -> Result<(), ExecutorError> { + self.load_balancer.reload_restart().await + } +} + +#[async_trait] +impl DhcpServer for HAClusterTopology { + async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> { + self.dhcp_server.add_static_mapping(entry).await + } + async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError> { + self.dhcp_server.remove_static_mapping(mac).await + } + async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> { + self.dhcp_server.list_static_mappings().await + } + async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> { + self.dhcp_server.set_next_server(ip).await + } + async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> { + self.dhcp_server.set_boot_filename(boot_filename).await + } + fn get_ip(&self) -> IpAddress { + self.dhcp_server.get_ip() + } + fn get_host(&self) -> LogicalHost { + self.dhcp_server.get_host() + } + async fn commit_config(&self) -> Result<(), ExecutorError> { + self.dhcp_server.commit_config().await + } +} + +#[async_trait] +impl TftpServer for HAClusterTopology { + async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { + self.tftp_server.serve_files(url).await + } + fn get_ip(&self) -> IpAddress { + self.tftp_server.get_ip() + } + + async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> { + self.tftp_server.set_ip(ip).await + } + async fn ensure_initialized(&self) -> Result<(), ExecutorError> { + self.tftp_server.ensure_initialized().await + } + async fn commit_config(&self) -> Result<(), ExecutorError> { + self.tftp_server.commit_config().await + } + async fn reload_restart(&self) -> Result<(), ExecutorError> { + self.tftp_server.reload_restart().await + } +} + +impl Router for HAClusterTopology { + fn get_gateway(&self) -> super::IpAddress { + self.router.get_gateway() + } + fn get_cidr(&self) -> cidr::Ipv4Cidr { + self.router.get_cidr() + } + fn get_host(&self) -> LogicalHost { + self.router.get_host() + } +} + +#[async_trait] +impl HttpServer for HAClusterTopology { + async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { + self.http_server.serve_files(url).await + } + + fn get_ip(&self) -> IpAddress { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + async fn ensure_initialized(&self) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + async fn commit_config(&self) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + async fn reload_restart(&self) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } +} + +#[derive(Debug)] struct DummyInfra; const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported"; @@ -210,11 +358,7 @@ impl DnsServer for DummyInfra { async fn register_hosts(&self, _hosts: Vec) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } - fn remove_record( - &mut self, - _name: &str, - _record_type: DnsRecordType, - ) -> Result<(), ExecutorError> { + fn remove_record(&self, _name: &str, _record_type: DnsRecordType) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } async fn list_records(&self) -> Vec { diff --git a/harmony/src/domain/topology/host_binding.rs b/harmony/src/domain/topology/host_binding.rs index c01869a..371f159 100644 --- a/harmony/src/domain/topology/host_binding.rs +++ b/harmony/src/domain/topology/host_binding.rs @@ -1,4 +1,5 @@ use derive_new::new; +use serde::Serialize; use crate::hardware::PhysicalHost; @@ -8,7 +9,7 @@ use super::LogicalHost; /// /// This is the only construct that directly maps a logical host to a physical host. /// It serves as a bridge between the logical cluster structure and the physical infrastructure. -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct HostBinding { /// Reference to the LogicalHost pub logical_host: LogicalHost, diff --git a/harmony/src/domain/topology/load_balancer.rs b/harmony/src/domain/topology/load_balancer.rs index 4ebcba8..afb9092 100644 --- a/harmony/src/domain/topology/load_balancer.rs +++ b/harmony/src/domain/topology/load_balancer.rs @@ -2,6 +2,7 @@ use std::{net::SocketAddr, str::FromStr}; use async_trait::async_trait; use log::debug; +use serde::Serialize; use super::{IpAddress, LogicalHost}; use crate::executors::ExecutorError; @@ -36,20 +37,20 @@ impl std::fmt::Debug for dyn LoadBalancer { f.write_fmt(format_args!("LoadBalancer {}", self.get_ip())) } } -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct LoadBalancerService { pub backend_servers: Vec, pub listening_port: SocketAddr, pub health_check: Option, } -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct BackendServer { pub address: String, pub port: u16, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub enum HttpMethod { GET, POST, @@ -91,14 +92,14 @@ impl std::fmt::Display for HttpMethod { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub enum HttpStatusCode { Success2xx, UserError4xx, ServerError5xx, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub enum HealthCheck { HTTP(String, HttpMethod, HttpStatusCode), TCP(Option), diff --git a/harmony/src/domain/topology/mod.rs b/harmony/src/domain/topology/mod.rs index 81dd36e..fa0b7fe 100644 --- a/harmony/src/domain/topology/mod.rs +++ b/harmony/src/domain/topology/mod.rs @@ -12,23 +12,40 @@ mod network; pub use host_binding::*; pub use http::*; pub use network::*; +use serde::Serialize; pub use tftp::*; -use std::{net::IpAddr, sync::Arc}; +use std::net::IpAddr; + +pub trait Topology { + fn name(&self) -> &str; +} pub type IpAddress = IpAddr; #[derive(Debug, Clone)] pub enum Url { LocalFolder(String), - Remote(url::Url), + Url(url::Url), +} + +impl Serialize for Url { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + Url::LocalFolder(path) => serializer.serialize_str(path), + Url::Url(url) => serializer.serialize_str(&url.as_str()), + } + } } impl std::fmt::Display for Url { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Url::LocalFolder(path) => write!(f, "{}", path), - Url::Remote(url) => write!(f, "{}", url), + Url::Url(url) => write!(f, "{}", url), } } } @@ -42,7 +59,7 @@ impl std::fmt::Display for Url { /// - A control plane node /// /// This abstraction focuses on the logical role and services, independent of the physical hardware. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct LogicalHost { /// The IP address of this logical host. pub ip: IpAddress, @@ -124,3 +141,23 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option { } } } + +#[cfg(test)] +mod tests { + use super::*; + use serde_json; + + #[test] + fn test_serialize_local_folder() { + let url = Url::LocalFolder("path/to/folder".to_string()); + let serialized = serde_json::to_string(&url).unwrap(); + assert_eq!(serialized, "\"path/to/folder\""); + } + + #[test] + fn test_serialize_url() { + let url = Url::Url(url::Url::parse("https://example.com").unwrap()); + let serialized = serde_json::to_string(&url).unwrap(); + assert_eq!(serialized, "\"https://example.com/\""); + } +} diff --git a/harmony/src/domain/topology/network.rs b/harmony/src/domain/topology/network.rs index f45c87d..13d1902 100644 --- a/harmony/src/domain/topology/network.rs +++ b/harmony/src/domain/topology/network.rs @@ -1,11 +1,12 @@ -use std::{net::Ipv4Addr, str::FromStr}; +use std::{net::Ipv4Addr, str::FromStr, sync::Arc}; use async_trait::async_trait; use harmony_types::net::MacAddress; +use serde::Serialize; use crate::executors::ExecutorError; -use super::{IpAddress, LogicalHost}; +use super::{IpAddress, LogicalHost, openshift::OpenshiftClient}; #[derive(Debug)] pub struct DHCPStaticEntry { @@ -40,9 +41,13 @@ impl std::fmt::Debug for dyn Firewall { pub struct NetworkDomain { pub name: String, } +#[async_trait] +pub trait OcK8sclient: Send + Sync + std::fmt::Debug { + async fn oc_client(&self) -> Result, kube::Error>; +} #[async_trait] -pub trait DhcpServer: Send + Sync { +pub trait DhcpServer: Send + Sync + std::fmt::Debug { async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>; async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; @@ -53,21 +58,11 @@ pub trait DhcpServer: Send + Sync { async fn commit_config(&self) -> Result<(), ExecutorError>; } -impl std::fmt::Debug for dyn DhcpServer { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_fmt(format_args!("DhcpServer {}", self.get_ip())) - } -} - #[async_trait] pub trait DnsServer: Send + Sync { async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError>; async fn register_hosts(&self, hosts: Vec) -> Result<(), ExecutorError>; - fn remove_record( - &mut self, - name: &str, - record_type: DnsRecordType, - ) -> Result<(), ExecutorError>; + fn remove_record(&self, name: &str, record_type: DnsRecordType) -> Result<(), ExecutorError>; async fn list_records(&self) -> Vec; fn get_ip(&self) -> IpAddress; fn get_host(&self) -> LogicalHost; @@ -118,7 +113,7 @@ pub enum Action { Deny, } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub enum DnsRecordType { A, AAAA, @@ -139,7 +134,7 @@ impl std::fmt::Display for DnsRecordType { } } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct DnsRecord { pub host: String, pub domain: String, @@ -255,7 +250,7 @@ mod test { } fn remove_record( - &mut self, + &self, _name: &str, _record_type: DnsRecordType, ) -> Result<(), ExecutorError> { diff --git a/harmony/src/infra/hp_ilo/mod.rs b/harmony/src/infra/hp_ilo/mod.rs index 051f175..ff0e313 100644 --- a/harmony/src/infra/hp_ilo/mod.rs +++ b/harmony/src/infra/hp_ilo/mod.rs @@ -3,8 +3,9 @@ use crate::topology::IpAddress; use derive_new::new; use harmony_types::net::MacAddress; use log::info; +use serde::Serialize; -#[derive(new)] +#[derive(new, Serialize)] pub struct HPIlo { ip_address: Option, mac_address: Option, diff --git a/harmony/src/infra/intel_amt/mod.rs b/harmony/src/infra/intel_amt/mod.rs index 088afd5..7251729 100644 --- a/harmony/src/infra/intel_amt/mod.rs +++ b/harmony/src/infra/intel_amt/mod.rs @@ -2,8 +2,9 @@ use crate::hardware::ManagementInterface; use derive_new::new; use harmony_types::net::MacAddress; use log::info; +use serde::Serialize; -#[derive(new)] +#[derive(new, Serialize)] pub struct IntelAmtManagement { mac_address: MacAddress, } diff --git a/harmony/src/infra/opnsense/dns.rs b/harmony/src/infra/opnsense/dns.rs index 765cdb0..56f8136 100644 --- a/harmony/src/infra/opnsense/dns.rs +++ b/harmony/src/infra/opnsense/dns.rs @@ -30,7 +30,7 @@ impl DnsServer for OPNSenseFirewall { } fn remove_record( - &mut self, + &self, _name: &str, _record_type: crate::topology::DnsRecordType, ) -> Result<(), ExecutorError> { diff --git a/harmony/src/infra/opnsense/http.rs b/harmony/src/infra/opnsense/http.rs index 3c33bba..a06fe5b 100644 --- a/harmony/src/infra/opnsense/http.rs +++ b/harmony/src/infra/opnsense/http.rs @@ -22,7 +22,7 @@ impl HttpServer for OPNSenseFirewall { .await .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; } - Url::Remote(_url) => todo!(), + Url::Url(_url) => todo!(), } Ok(()) } diff --git a/harmony/src/infra/opnsense/management.rs b/harmony/src/infra/opnsense/management.rs index db2bef4..a8dce18 100644 --- a/harmony/src/infra/opnsense/management.rs +++ b/harmony/src/infra/opnsense/management.rs @@ -1,7 +1,8 @@ use crate::hardware::ManagementInterface; use derive_new::new; +use serde::Serialize; -#[derive(new)] +#[derive(new, Serialize)] pub struct OPNSenseManagementInterface {} impl ManagementInterface for OPNSenseManagementInterface { diff --git a/harmony/src/infra/opnsense/tftp.rs b/harmony/src/infra/opnsense/tftp.rs index 3f1156e..6978150 100644 --- a/harmony/src/infra/opnsense/tftp.rs +++ b/harmony/src/infra/opnsense/tftp.rs @@ -22,7 +22,7 @@ impl TftpServer for OPNSenseFirewall { .await .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; } - Url::Remote(url) => todo!("This url is not supported yet {url}"), + Url::Url(url) => todo!("This url is not supported yet {url}"), } Ok(()) } diff --git a/harmony/src/modules/dhcp.rs b/harmony/src/modules/dhcp.rs index 604d624..6ef0c3d 100644 --- a/harmony/src/modules/dhcp.rs +++ b/harmony/src/modules/dhcp.rs @@ -1,37 +1,32 @@ -use std::sync::Arc; - use async_trait::async_trait; use derive_new::new; use log::info; +use serde::Serialize; use crate::{ domain::{data::Version, interpret::InterpretStatus}, interpret::{Interpret, InterpretError, InterpretName, Outcome}, inventory::Inventory, - topology::{DHCPStaticEntry, HAClusterTopology, HostBinding, IpAddress}, + topology::{DHCPStaticEntry, DhcpServer, HostBinding, IpAddress, Topology}, }; use crate::domain::score::Score; -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct DhcpScore { pub host_binding: Vec, pub next_server: Option, pub boot_filename: Option, } -impl Score for DhcpScore { - fn create_interpret(&self) -> Box { +impl Score for DhcpScore { + fn create_interpret(&self) -> Box> { Box::new(DhcpInterpret::new(self.clone())) } fn name(&self) -> String { "DhcpScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } // https://docs.opnsense.org/manual/dhcp.html#advanced-settings @@ -52,10 +47,10 @@ impl DhcpInterpret { status: InterpretStatus::QUEUED, } } - async fn add_static_entries( + async fn add_static_entries( &self, _inventory: &Inventory, - topology: &HAClusterTopology, + dhcp_server: &D, ) -> Result { let dhcp_entries: Vec = self .score @@ -78,7 +73,6 @@ impl DhcpInterpret { .collect(); info!("DHCPStaticEntry : {:?}", dhcp_entries); - let dhcp_server = Arc::new(topology.dhcp_server.clone()); info!("DHCP server : {:?}", dhcp_server); let number_new_entries = dhcp_entries.len(); @@ -96,14 +90,13 @@ impl DhcpInterpret { )) } - async fn set_pxe_options( + async fn set_pxe_options( &self, _inventory: &Inventory, - topology: &HAClusterTopology, + dhcp_server: &D, ) -> Result { let next_server_outcome = match self.score.next_server { Some(next_server) => { - let dhcp_server = Arc::new(topology.dhcp_server.clone()); dhcp_server.set_next_server(next_server).await?; Outcome::new( InterpretStatus::SUCCESS, @@ -115,7 +108,6 @@ impl DhcpInterpret { let boot_filename_outcome = match &self.score.boot_filename { Some(boot_filename) => { - let dhcp_server = Arc::new(topology.dhcp_server.clone()); dhcp_server.set_boot_filename(&boot_filename).await?; Outcome::new( InterpretStatus::SUCCESS, @@ -142,7 +134,7 @@ impl DhcpInterpret { } #[async_trait] -impl Interpret for DhcpInterpret { +impl Interpret for DhcpInterpret { fn get_name(&self) -> InterpretName { InterpretName::OPNSenseDHCP } @@ -162,15 +154,15 @@ impl Interpret for DhcpInterpret { async fn execute( &self, inventory: &Inventory, - topology: &HAClusterTopology, + topology: &T, ) -> Result { - info!("Executing {} on inventory {inventory:?}", self.get_name()); + info!("Executing DhcpInterpret on inventory {inventory:?}"); self.set_pxe_options(inventory, topology).await?; self.add_static_entries(inventory, topology).await?; - topology.dhcp_server.commit_config().await?; + topology.commit_config().await?; Ok(Outcome::new( InterpretStatus::SUCCESS, diff --git a/harmony/src/modules/dns.rs b/harmony/src/modules/dns.rs index 79c8870..e52e2f9 100644 --- a/harmony/src/modules/dns.rs +++ b/harmony/src/modules/dns.rs @@ -1,33 +1,30 @@ use async_trait::async_trait; use derive_new::new; use log::info; +use serde::Serialize; use crate::{ data::Version, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, - topology::{DnsRecord, HAClusterTopology}, + topology::{DnsRecord, DnsServer, Topology}, }; -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct DnsScore { dns_entries: Vec, register_dhcp_leases: Option, } -impl Score for DnsScore { - fn create_interpret(&self) -> Box { +impl Score for DnsScore { + fn create_interpret(&self) -> Box> { Box::new(DnsInterpret::new(self.clone())) } fn name(&self) -> String { "DnsScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } // https://docs.opnsense.org/manual/dhcp.html#advanced-settings @@ -48,12 +45,11 @@ impl DnsInterpret { status: InterpretStatus::QUEUED, } } - async fn serve_dhcp_entries( + async fn serve_dhcp_entries( &self, _inventory: &Inventory, - topology: &HAClusterTopology, + dns: &T, ) -> Result { - let dns = topology.dns_server.clone(); if let Some(register) = self.score.register_dhcp_leases { dns.register_dhcp_leases(register).await?; } @@ -64,15 +60,12 @@ impl DnsInterpret { )) } - async fn ensure_hosts_registered( + async fn ensure_hosts_registered( &self, - topology: &HAClusterTopology, + dns_server: &D, ) -> Result { let entries = &self.score.dns_entries; - topology - .dns_server - .ensure_hosts_registered(entries.clone()) - .await?; + dns_server.ensure_hosts_registered(entries.clone()).await?; Ok(Outcome::new( InterpretStatus::SUCCESS, @@ -85,7 +78,7 @@ impl DnsInterpret { } #[async_trait] -impl Interpret for DnsInterpret { +impl Interpret for DnsInterpret { fn get_name(&self) -> InterpretName { InterpretName::OPNSenseDns } @@ -105,14 +98,17 @@ impl Interpret for DnsInterpret { async fn execute( &self, inventory: &Inventory, - topology: &HAClusterTopology, + topology: &T, ) -> Result { - info!("Executing {} on inventory {inventory:?}", self.get_name()); + info!( + "Executing {} on inventory {inventory:?}", + >::get_name(self) + ); self.serve_dhcp_entries(inventory, topology).await?; - self.ensure_hosts_registered(&topology).await?; + self.ensure_hosts_registered(topology).await?; - topology.dns_server.commit_config().await?; + topology.commit_config().await?; Ok(Outcome::new( InterpretStatus::SUCCESS, diff --git a/harmony/src/modules/dummy.rs b/harmony/src/modules/dummy.rs index 0d2c327..2e63797 100644 --- a/harmony/src/modules/dummy.rs +++ b/harmony/src/modules/dummy.rs @@ -1,20 +1,21 @@ use async_trait::async_trait; +use serde::Serialize; use crate::{ data::Version, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, - topology::HAClusterTopology, + topology::Topology, }; /// Score that always errors. This is only useful for development/testing purposes. It does nothing /// except returning Err(InterpretError) when interpreted. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct ErrorScore; -impl Score for ErrorScore { - fn create_interpret(&self) -> Box { +impl Score for ErrorScore { + fn create_interpret(&self) -> Box> { Box::new(DummyInterpret { result: Err(InterpretError::new("Error Score default error".to_string())), status: InterpretStatus::QUEUED, @@ -24,19 +25,15 @@ impl Score for ErrorScore { fn name(&self) -> String { "ErrorScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } /// Score that always succeeds. This is only useful for development/testing purposes. It does nothing /// except returning Ok(Outcome::success) when interpreted. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct SuccessScore; -impl Score for SuccessScore { - fn create_interpret(&self) -> Box { +impl Score for SuccessScore { + fn create_interpret(&self) -> Box> { Box::new(DummyInterpret { result: Ok(Outcome::success("SuccessScore default success".to_string())), status: InterpretStatus::QUEUED, @@ -46,10 +43,6 @@ impl Score for SuccessScore { fn name(&self) -> String { "SuccessScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } /// An interpret that only returns the result it is given when built. It does nothing else. Only @@ -61,7 +54,7 @@ struct DummyInterpret { } #[async_trait] -impl Interpret for DummyInterpret { +impl Interpret for DummyInterpret { fn get_name(&self) -> InterpretName { InterpretName::Dummy } @@ -81,7 +74,7 @@ impl Interpret for DummyInterpret { async fn execute( &self, _inventory: &Inventory, - _topology: &HAClusterTopology, + _topology: &T, ) -> Result { self.result.clone() } @@ -89,21 +82,17 @@ impl Interpret for DummyInterpret { /// Score that always panics. This is only useful for development/testing purposes. It does nothing /// except panic! with an error message when interpreted -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct PanicScore; -impl Score for PanicScore { - fn create_interpret(&self) -> Box { +impl Score for PanicScore { + fn create_interpret(&self) -> Box> { Box::new(PanicInterpret {}) } fn name(&self) -> String { "PanicScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } /// An interpret that always panics when executed. Useful for development/testing purposes. @@ -111,7 +100,7 @@ impl Score for PanicScore { struct PanicInterpret; #[async_trait] -impl Interpret for PanicInterpret { +impl Interpret for PanicInterpret { fn get_name(&self) -> InterpretName { InterpretName::Panic } @@ -131,7 +120,7 @@ impl Interpret for PanicInterpret { async fn execute( &self, _inventory: &Inventory, - _topology: &HAClusterTopology, + _topology: &T, ) -> Result { panic!("Panic interpret always panics when executed") } diff --git a/harmony/src/modules/http.rs b/harmony/src/modules/http.rs index 51eed3b..c98ff8f 100644 --- a/harmony/src/modules/http.rs +++ b/harmony/src/modules/http.rs @@ -1,31 +1,28 @@ use async_trait::async_trait; use derive_new::new; +use serde::Serialize; use crate::{ data::{Id, Version}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, - topology::{HAClusterTopology, Url}, + topology::{HttpServer, Topology, Url}, }; -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct HttpScore { files_to_serve: Url, } -impl Score for HttpScore { - fn create_interpret(&self) -> Box { +impl Score for HttpScore { + fn create_interpret(&self) -> Box> { Box::new(HttpInterpret::new(self.clone())) } fn name(&self) -> String { "HttpScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } #[derive(Debug, new, Clone)] @@ -34,13 +31,12 @@ pub struct HttpInterpret { } #[async_trait] -impl Interpret for HttpInterpret { +impl Interpret for HttpInterpret { async fn execute( &self, _inventory: &Inventory, - topology: &HAClusterTopology, + http_server: &T, ) -> Result { - let http_server = &topology.http_server; http_server.ensure_initialized().await?; // http_server.set_ip(topology.router.get_gateway()).await?; http_server.serve_files(&self.score.files_to_serve).await?; diff --git a/harmony/src/modules/k8s/deployment.rs b/harmony/src/modules/k8s/deployment.rs index de93e3a..cd2ad90 100644 --- a/harmony/src/modules/k8s/deployment.rs +++ b/harmony/src/modules/k8s/deployment.rs @@ -1,18 +1,23 @@ use k8s_openapi::api::apps::v1::Deployment; +use serde::Serialize; use serde_json::json; -use crate::{interpret::Interpret, score::Score}; +use crate::{ + interpret::Interpret, + score::Score, + topology::{OcK8sclient, Topology}, +}; use super::resource::{K8sResourceInterpret, K8sResourceScore}; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct K8sDeploymentScore { pub name: String, pub image: String, } -impl Score for K8sDeploymentScore { - fn create_interpret(&self) -> Box { +impl Score for K8sDeploymentScore { + fn create_interpret(&self) -> Box> { let deployment: Deployment = serde_json::from_value(json!( { "metadata": { @@ -51,8 +56,4 @@ impl Score for K8sDeploymentScore { fn name(&self) -> String { "K8sDeploymentScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } diff --git a/harmony/src/modules/k8s/resource.rs b/harmony/src/modules/k8s/resource.rs index cf45be8..505c4a4 100644 --- a/harmony/src/modules/k8s/resource.rs +++ b/harmony/src/modules/k8s/resource.rs @@ -1,17 +1,17 @@ use async_trait::async_trait; use k8s_openapi::NamespaceResourceScope; use kube::Resource; -use serde::de::DeserializeOwned; +use serde::{Serialize, de::DeserializeOwned}; use crate::{ data::{Id, Version}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, - topology::HAClusterTopology, + topology::{OcK8sclient, Topology}, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct K8sResourceScore { pub resource: Vec, } @@ -34,21 +34,18 @@ impl< + 'static + Send + Clone, -> Score for K8sResourceScore + T: Topology, +> Score for K8sResourceScore where ::DynamicType: Default, { - fn create_interpret(&self) -> Box { + fn create_interpret(&self) -> Box> { todo!() } fn name(&self) -> String { "K8sResourceScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } #[derive(Debug)] @@ -66,14 +63,15 @@ impl< + Default + Send + Sync, -> Interpret for K8sResourceInterpret + T: Topology + OcK8sclient, +> Interpret for K8sResourceInterpret where ::DynamicType: Default, { async fn execute( &self, _inventory: &Inventory, - topology: &HAClusterTopology, + topology: &T, ) -> Result { topology .oc_client() diff --git a/harmony/src/modules/lamp.rs b/harmony/src/modules/lamp.rs new file mode 100644 index 0000000..ef7227c --- /dev/null +++ b/harmony/src/modules/lamp.rs @@ -0,0 +1,87 @@ +use std::path::{Path, PathBuf}; + +use async_trait::async_trait; +use serde::Serialize; + +use crate::{ + data::{Id, Version}, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + modules::k8s::deployment::K8sDeploymentScore, + score::Score, + topology::{OcK8sclient, Topology, Url}, +}; + +#[derive(Debug, Clone, Serialize)] +pub struct LAMPScore { + pub name: String, + pub domain: Url, + pub config: LAMPConfig, + pub php_version: Version, +} + +#[derive(Debug, Clone, Serialize)] +pub struct LAMPConfig { + pub project_root: PathBuf, + pub ssl_enabled: bool, +} + +impl Default for LAMPConfig { + fn default() -> Self { + LAMPConfig { + project_root: Path::new("./src").to_path_buf(), + ssl_enabled: true, + } + } +} + +impl Score for LAMPScore { + fn create_interpret(&self) -> Box> { + todo!() + } + + fn name(&self) -> String { + "LampScore".to_string() + } +} + +#[derive(Debug)] +pub struct LAMPInterpret { + score: LAMPScore, +} + +#[async_trait] +impl Interpret for LAMPInterpret { + async fn execute( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result { + let deployment_score = K8sDeploymentScore { + name: >::name(&self.score), + image: "local_image".to_string(), + }; + + deployment_score + .create_interpret() + .execute(inventory, topology) + .await?; + todo!() + } + + fn get_name(&self) -> InterpretName { + todo!() + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} diff --git a/harmony/src/modules/load_balancer.rs b/harmony/src/modules/load_balancer.rs index 75318ca..cd78f84 100644 --- a/harmony/src/modules/load_balancer.rs +++ b/harmony/src/modules/load_balancer.rs @@ -1,15 +1,16 @@ use async_trait::async_trait; use log::info; +use serde::Serialize; use crate::{ data::{Id, Version}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, - topology::{HAClusterTopology, LoadBalancerService}, + topology::{LoadBalancer, LoadBalancerService, Topology}, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct LoadBalancerScore { pub public_services: Vec, pub private_services: Vec, @@ -19,18 +20,14 @@ pub struct LoadBalancerScore { // uuid? } -impl Score for LoadBalancerScore { - fn create_interpret(&self) -> Box { +impl Score for LoadBalancerScore { + fn create_interpret(&self) -> Box> { Box::new(LoadBalancerInterpret::new(self.clone())) } fn name(&self) -> String { "LoadBalancerScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } #[derive(Debug)] @@ -51,37 +48,32 @@ impl LoadBalancerInterpret { } #[async_trait] -impl Interpret for LoadBalancerInterpret { +impl Interpret for LoadBalancerInterpret { async fn execute( &self, _inventory: &Inventory, - topology: &HAClusterTopology, + load_balancer: &T, ) -> Result { info!( "Making sure Load Balancer is initialized: {:?}", - topology.load_balancer.ensure_initialized().await? + load_balancer.ensure_initialized().await? ); for service in self.score.public_services.iter() { info!("Ensuring service exists {service:?}"); - topology - .load_balancer - .ensure_service_exists(service) - .await?; + + load_balancer.ensure_service_exists(service).await?; } for service in self.score.private_services.iter() { info!("Ensuring private service exists {service:?}"); - topology - .load_balancer - .ensure_service_exists(service) - .await?; + load_balancer.ensure_service_exists(service).await?; } info!("Applying load balancer configuration"); - topology.load_balancer.commit_config().await?; + load_balancer.commit_config().await?; info!("Making a full reload and restart of haproxy"); - topology.load_balancer.reload_restart().await?; + load_balancer.reload_restart().await?; Ok(Outcome::success(format!( "Load balancer successfully configured {} services", self.score.public_services.len() + self.score.private_services.len() diff --git a/harmony/src/modules/mod.rs b/harmony/src/modules/mod.rs index c181375..8456867 100644 --- a/harmony/src/modules/mod.rs +++ b/harmony/src/modules/mod.rs @@ -3,6 +3,7 @@ pub mod dns; pub mod dummy; pub mod http; pub mod k8s; +pub mod lamp; pub mod load_balancer; pub mod okd; pub mod opnsense; diff --git a/harmony/src/modules/okd/bootstrap_dhcp.rs b/harmony/src/modules/okd/bootstrap_dhcp.rs index 4f5c6ee..2e3dd6f 100644 --- a/harmony/src/modules/okd/bootstrap_dhcp.rs +++ b/harmony/src/modules/okd/bootstrap_dhcp.rs @@ -1,12 +1,14 @@ +use serde::Serialize; + use crate::{ interpret::Interpret, inventory::Inventory, modules::dhcp::DhcpScore, score::Score, - topology::{HAClusterTopology, HostBinding}, + topology::{DhcpServer, HAClusterTopology, HostBinding, Topology}, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct OKDBootstrapDhcpScore { dhcp_score: DhcpScore, } @@ -46,16 +48,12 @@ impl OKDBootstrapDhcpScore { } } -impl Score for OKDBootstrapDhcpScore { - fn create_interpret(&self) -> Box { +impl Score for OKDBootstrapDhcpScore { + fn create_interpret(&self) -> Box> { self.dhcp_score.create_interpret() } fn name(&self) -> String { "OKDBootstrapDhcpScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } diff --git a/harmony/src/modules/okd/bootstrap_load_balancer.rs b/harmony/src/modules/okd/bootstrap_load_balancer.rs index 4c29026..d6cd2f3 100644 --- a/harmony/src/modules/okd/bootstrap_load_balancer.rs +++ b/harmony/src/modules/okd/bootstrap_load_balancer.rs @@ -1,16 +1,18 @@ use std::net::SocketAddr; +use serde::Serialize; + use crate::{ interpret::Interpret, modules::load_balancer::LoadBalancerScore, score::Score, topology::{ - BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, - LoadBalancerService, + BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, + LoadBalancerService, Topology, }, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct OKDBootstrapLoadBalancerScore { load_balancer_score: LoadBalancerScore, } @@ -69,16 +71,12 @@ impl OKDBootstrapLoadBalancerScore { } } -impl Score for OKDBootstrapLoadBalancerScore { - fn create_interpret(&self) -> Box { +impl Score for OKDBootstrapLoadBalancerScore { + fn create_interpret(&self) -> Box> { self.load_balancer_score.create_interpret() } fn name(&self) -> String { "OKDBootstrapLoadBalancerScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } diff --git a/harmony/src/modules/okd/dhcp.rs b/harmony/src/modules/okd/dhcp.rs index c976f8d..a060b31 100644 --- a/harmony/src/modules/okd/dhcp.rs +++ b/harmony/src/modules/okd/dhcp.rs @@ -1,12 +1,14 @@ +use serde::Serialize; + use crate::{ interpret::Interpret, inventory::Inventory, modules::dhcp::DhcpScore, score::Score, - topology::{HAClusterTopology, HostBinding}, + topology::{DhcpServer, HAClusterTopology, HostBinding, Topology}, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct OKDDhcpScore { dhcp_score: DhcpScore, } @@ -38,16 +40,12 @@ impl OKDDhcpScore { } } -impl Score for OKDDhcpScore { - fn create_interpret(&self) -> Box { +impl Score for OKDDhcpScore { + fn create_interpret(&self) -> Box> { self.dhcp_score.create_interpret() } fn name(&self) -> String { "OKDDhcpScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } diff --git a/harmony/src/modules/okd/dns.rs b/harmony/src/modules/okd/dns.rs index f4e7f2c..6f99cb8 100644 --- a/harmony/src/modules/okd/dns.rs +++ b/harmony/src/modules/okd/dns.rs @@ -1,11 +1,13 @@ +use serde::Serialize; + use crate::{ interpret::Interpret, modules::dns::DnsScore, score::Score, - topology::{DnsRecord, DnsRecordType, HAClusterTopology}, + topology::{DnsRecord, DnsRecordType, DnsServer, HAClusterTopology, Topology}, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub struct OKDDnsScore { dns_score: DnsScore, } @@ -40,16 +42,12 @@ impl OKDDnsScore { } } -impl Score for OKDDnsScore { - fn create_interpret(&self) -> Box { +impl Score for OKDDnsScore { + fn create_interpret(&self) -> Box> { self.dns_score.create_interpret() } fn name(&self) -> String { "OKDDnsScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } diff --git a/harmony/src/modules/okd/load_balancer.rs b/harmony/src/modules/okd/load_balancer.rs index 38a5d04..0345d46 100644 --- a/harmony/src/modules/okd/load_balancer.rs +++ b/harmony/src/modules/okd/load_balancer.rs @@ -1,16 +1,24 @@ use std::net::SocketAddr; +use serde::Serialize; + use crate::{ interpret::Interpret, modules::load_balancer::LoadBalancerScore, score::Score, topology::{ - BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, - LoadBalancerService, + BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, + LoadBalancerService, Topology, }, }; -#[derive(Debug, Clone)] +impl std::fmt::Display for OKDLoadBalancerScore { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + todo!() + } +} + +#[derive(Debug, Clone, Serialize)] pub struct OKDLoadBalancerScore { load_balancer_score: LoadBalancerScore, } @@ -80,16 +88,12 @@ impl OKDLoadBalancerScore { } } -impl Score for OKDLoadBalancerScore { - fn create_interpret(&self) -> Box { +impl Score for OKDLoadBalancerScore { + fn create_interpret(&self) -> Box> { self.load_balancer_score.create_interpret() } fn name(&self) -> String { "OKDLoadBalancerScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } diff --git a/harmony/src/modules/okd/upgrade.rs b/harmony/src/modules/okd/upgrade.rs index a4fc6b1..a215c00 100644 --- a/harmony/src/modules/okd/upgrade.rs +++ b/harmony/src/modules/okd/upgrade.rs @@ -1,4 +1,4 @@ -use crate::{data::Version, score::Score}; +use crate::data::Version; #[derive(Debug, Clone)] pub struct OKDUpgradeScore { diff --git a/harmony/src/modules/opnsense/mod.rs b/harmony/src/modules/opnsense/mod.rs index 763195d..28b52cf 100644 --- a/harmony/src/modules/opnsense/mod.rs +++ b/harmony/src/modules/opnsense/mod.rs @@ -1,6 +1,4 @@ - mod shell; mod upgrade; pub use shell::*; pub use upgrade::*; - diff --git a/harmony/src/modules/opnsense/shell.rs b/harmony/src/modules/opnsense/shell.rs index 00fd131..a35a43c 100644 --- a/harmony/src/modules/opnsense/shell.rs +++ b/harmony/src/modules/opnsense/shell.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use async_trait::async_trait; +use serde::Serialize; use tokio::sync::RwLock; use crate::{ @@ -8,17 +9,34 @@ use crate::{ interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, - topology::HAClusterTopology, + topology::Topology, }; #[derive(Debug, Clone)] pub struct OPNsenseShellCommandScore { + // TODO I am pretty sure we should not hold a direct reference to the + // opnsense_config::Config here. + // This causes a problem with serialization but also could cause many more problems as this + // is mixing concerns of configuration (which is the Responsibility of Scores to define) + // and state/execution which is the responsibility of interprets via topologies to manage + // + // I feel like a better solution would be for this Score/Interpret to require + // Topology + OPNSenseShell trait bindings pub opnsense: Arc>, pub command: String, } -impl Score for OPNsenseShellCommandScore { - fn create_interpret(&self) -> Box { +impl Serialize for OPNsenseShellCommandScore { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + todo!("See comment about moving opnsense_config::Config outside the score") + } +} + +impl Score for OPNsenseShellCommandScore { + fn create_interpret(&self) -> Box> { Box::new(OPNsenseShellInterpret { status: InterpretStatus::QUEUED, score: self.clone(), @@ -28,10 +46,6 @@ impl Score for OPNsenseShellCommandScore { fn name(&self) -> String { "OPNSenseShellCommandScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } #[derive(Debug)] @@ -41,11 +55,11 @@ pub struct OPNsenseShellInterpret { } #[async_trait] -impl Interpret for OPNsenseShellInterpret { +impl Interpret for OPNsenseShellInterpret { async fn execute( &self, _inventory: &Inventory, - _topology: &HAClusterTopology, + _topology: &T, ) -> Result { let output = self .score diff --git a/harmony/src/modules/opnsense/upgrade.rs b/harmony/src/modules/opnsense/upgrade.rs index 6b0637d..45adf12 100644 --- a/harmony/src/modules/opnsense/upgrade.rs +++ b/harmony/src/modules/opnsense/upgrade.rs @@ -1,10 +1,12 @@ use std::sync::Arc; +use serde::Serialize; use tokio::sync::RwLock; use crate::{ interpret::{Interpret, InterpretStatus}, score::Score, + topology::Topology, }; use super::{OPNsenseShellCommandScore, OPNsenseShellInterpret}; @@ -14,8 +16,17 @@ pub struct OPNSenseLaunchUpgrade { pub opnsense: Arc>, } -impl Score for OPNSenseLaunchUpgrade { - fn create_interpret(&self) -> Box { +impl Serialize for OPNSenseLaunchUpgrade { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + todo!("See comment in OPNSenseShellCommandScore and apply the same idea here") + } +} + +impl Score for OPNSenseLaunchUpgrade { + fn create_interpret(&self) -> Box> { let score = OPNsenseShellCommandScore { opnsense: self.opnsense.clone(), command: "/usr/local/opnsense/scripts/firmware/update.sh".to_string(), @@ -30,8 +41,4 @@ impl Score for OPNSenseLaunchUpgrade { fn name(&self) -> String { "OPNSenseLaunchUpgrade".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } diff --git a/harmony/src/modules/tftp.rs b/harmony/src/modules/tftp.rs index a7c2167..357e480 100644 --- a/harmony/src/modules/tftp.rs +++ b/harmony/src/modules/tftp.rs @@ -1,31 +1,28 @@ use async_trait::async_trait; use derive_new::new; +use serde::Serialize; use crate::{ data::{Id, Version}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, - topology::{HAClusterTopology, Url}, + topology::{Router, TftpServer, Topology, Url}, }; -#[derive(Debug, new, Clone)] +#[derive(Debug, new, Clone, Serialize)] pub struct TftpScore { files_to_serve: Url, } -impl Score for TftpScore { - fn create_interpret(&self) -> Box { +impl Score for TftpScore { + fn create_interpret(&self) -> Box> { Box::new(TftpInterpret::new(self.clone())) } fn name(&self) -> String { "TftpScore".to_string() } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } } #[derive(Debug, new, Clone)] @@ -34,18 +31,17 @@ pub struct TftpInterpret { } #[async_trait] -impl Interpret for TftpInterpret { +impl Interpret for TftpInterpret { async fn execute( &self, _inventory: &Inventory, - topology: &HAClusterTopology, + topology: &T, ) -> Result { - let tftp_server = &topology.tftp_server; - tftp_server.ensure_initialized().await?; - tftp_server.set_ip(topology.router.get_gateway()).await?; - tftp_server.serve_files(&self.score.files_to_serve).await?; - tftp_server.commit_config().await?; - tftp_server.reload_restart().await?; + topology.ensure_initialized().await?; + topology.set_ip(topology.get_gateway()).await?; + topology.serve_files(&self.score.files_to_serve).await?; + topology.commit_config().await?; + topology.reload_restart().await?; Ok(Outcome::success(format!( "TFTP Server running and serving files from {}", self.score.files_to_serve diff --git a/harmony_tui/src/lib.rs b/harmony_tui/src/lib.rs index 7ee0301..11208f0 100644 --- a/harmony_tui/src/lib.rs +++ b/harmony_tui/src/lib.rs @@ -1,4 +1,3 @@ -mod ratatui_utils; mod widget; use log::{debug, error, info}; @@ -10,12 +9,12 @@ use widget::{help::HelpWidget, score::ScoreListWidget}; use std::{panic, sync::Arc, time::Duration}; use crossterm::event::{Event, EventStream, KeyCode, KeyEventKind}; -use harmony::{maestro::Maestro, score::Score}; +use harmony::{maestro::Maestro, score::Score, topology::Topology}; use ratatui::{ self, Frame, layout::{Constraint, Layout, Position}, style::{Color, Style}, - widgets::{Block, Borders, ListItem}, + widgets::{Block, Borders}, }; pub mod tui { @@ -30,36 +29,47 @@ pub mod tui { /// /// # Example /// -/// ```rust -/// use harmony; -/// use harmony_tui::init; -/// -/// #[harmony::main] -/// pub async fn main(maestro: harmony::Maestro) { -/// maestro.register(DeploymentScore::new("nginx-test", "nginx")); -/// maestro.register(OKDLoadBalancerScore::new(&maestro.inventory, &maestro.topology)); -/// // Register other scores as needed -/// -/// init(maestro).await.unwrap(); +/// ```rust,no_run +/// use harmony::{ +/// inventory::Inventory, +/// maestro::Maestro, +/// modules::dummy::{ErrorScore, PanicScore, SuccessScore}, +/// topology::HAClusterTopology, +/// }; +/// +/// #[tokio::main] +/// async fn main() { +/// let inventory = Inventory::autoload(); +/// let topology = HAClusterTopology::autoload(); +/// let mut maestro = Maestro::new(inventory, topology); +/// +/// maestro.register_all(vec![ +/// Box::new(SuccessScore {}), +/// Box::new(ErrorScore {}), +/// Box::new(PanicScore {}), +/// ]); +/// harmony_tui::init(maestro).await.unwrap(); /// } /// ``` -pub async fn init(maestro: Maestro) -> Result<(), Box> { +pub async fn init( + maestro: Maestro, +) -> Result<(), Box> { HarmonyTUI::new(maestro).init().await } -pub struct HarmonyTUI { - score: ScoreListWidget, +pub struct HarmonyTUI { + score: ScoreListWidget, should_quit: bool, tui_state: TuiWidgetState, } #[derive(Debug)] -enum HarmonyTuiEvent { - LaunchScore(ScoreItem), +enum HarmonyTuiEvent { + LaunchScore(Box>), } -impl HarmonyTUI { - pub fn new(maestro: Maestro) -> Self { +impl HarmonyTUI { + pub fn new(maestro: Maestro) -> Self { let maestro = Arc::new(maestro); let (_handle, sender) = Self::start_channel(maestro.clone()); let score = ScoreListWidget::new(Self::scores_list(&maestro), sender); @@ -72,9 +82,12 @@ impl HarmonyTUI { } fn start_channel( - maestro: Arc, - ) -> (tokio::task::JoinHandle<()>, mpsc::Sender) { - let (sender, mut receiver) = mpsc::channel::(32); + maestro: Arc>, + ) -> ( + tokio::task::JoinHandle<()>, + mpsc::Sender>, + ) { + let (sender, mut receiver) = mpsc::channel::>(32); let handle = tokio::spawn(async move { info!("Starting message channel receiver loop"); while let Some(event) = receiver.recv().await { @@ -84,8 +97,7 @@ impl HarmonyTUI { let maestro = maestro.clone(); let joinhandle_result = - tokio::spawn(async move { maestro.interpret(score_item.0).await }) - .await; + tokio::spawn(async move { maestro.interpret(score_item).await }).await; match joinhandle_result { Ok(interpretation_result) => match interpretation_result { @@ -163,13 +175,10 @@ impl HarmonyTUI { frame.render_widget(tui_logger, output_area) } - fn scores_list(maestro: &Maestro) -> Vec { + fn scores_list(maestro: &Maestro) -> Vec>> { let scores = maestro.scores(); let scores_read = scores.read().expect("Should be able to read scores"); - scores_read - .iter() - .map(|s| ScoreItem(s.clone_box())) - .collect() + scores_read.iter().map(|s| s.clone_box()).collect() } async fn handle_event(&mut self, event: &Event) { @@ -189,18 +198,3 @@ impl HarmonyTUI { } } } - -#[derive(Debug)] -struct ScoreItem(Box); - -impl ScoreItem { - pub fn clone(&self) -> Self { - Self(self.0.clone_box()) - } -} - -impl Into> for &ScoreItem { - fn into(self) -> ListItem<'static> { - ListItem::new(self.0.name()) - } -} diff --git a/harmony_tui/src/ratatui_utils.rs b/harmony_tui/src/ratatui_utils.rs deleted file mode 100644 index 84b8659..0000000 --- a/harmony_tui/src/ratatui_utils.rs +++ /dev/null @@ -1,22 +0,0 @@ -use ratatui::layout::{Constraint, Flex, Layout, Rect}; - -/// Centers a [`Rect`] within another [`Rect`] using the provided [`Constraint`]s. -/// -/// # Examples -/// -/// ```rust -/// use ratatui::layout::{Constraint, Rect}; -/// -/// let area = Rect::new(0, 0, 100, 100); -/// let horizontal = Constraint::Percentage(20); -/// let vertical = Constraint::Percentage(30); -/// -/// let centered = center(area, horizontal, vertical); -/// ``` -pub(crate) fn center(area: Rect, horizontal: Constraint, vertical: Constraint) -> Rect { - let [area] = Layout::horizontal([horizontal]) - .flex(Flex::Center) - .areas(area); - let [area] = Layout::vertical([vertical]).flex(Flex::Center).areas(area); - area -} diff --git a/harmony_tui/src/widget/score.rs b/harmony_tui/src/widget/score.rs index af992f7..b0d2c27 100644 --- a/harmony_tui/src/widget/score.rs +++ b/harmony_tui/src/widget/score.rs @@ -1,16 +1,17 @@ use std::sync::{Arc, RwLock}; use crossterm::event::{Event, KeyCode, KeyEventKind}; +use harmony::{score::Score, topology::Topology}; use log::{info, warn}; use ratatui::{ Frame, layout::Rect, style::{Style, Stylize}, - widgets::{List, ListState, StatefulWidget, Widget}, + widgets::{List, ListItem, ListState, StatefulWidget, Widget}, }; use tokio::sync::mpsc; -use crate::{HarmonyTuiEvent, ScoreItem}; +use crate::HarmonyTuiEvent; #[derive(Debug)] enum ExecutionState { @@ -20,22 +21,25 @@ enum ExecutionState { } #[derive(Debug)] -struct Execution { +struct Execution { state: ExecutionState, - score: ScoreItem, + score: Box>, } #[derive(Debug)] -pub(crate) struct ScoreListWidget { +pub(crate) struct ScoreListWidget { list_state: Arc>, - scores: Vec, - execution: Option, - execution_history: Vec, - sender: mpsc::Sender, + scores: Vec>>, + execution: Option>, + execution_history: Vec>, + sender: mpsc::Sender>, } -impl ScoreListWidget { - pub(crate) fn new(scores: Vec, sender: mpsc::Sender) -> Self { +impl ScoreListWidget { + pub(crate) fn new( + scores: Vec>>, + sender: mpsc::Sender>, + ) -> Self { let mut list_state = ListState::default(); list_state.select_first(); let list_state = Arc::new(RwLock::new(list_state)); @@ -58,9 +62,9 @@ impl ScoreListWidget { self.execution = Some(Execution { state: ExecutionState::INITIATED, - score: score.clone(), + score: score.clone_box(), }); - info!("{:#?}\n\nConfirm Execution (Press y/n)", score.0); + info!("{:#?}\n\nConfirm Execution (Press y/n)", score); } else { warn!("No Score selected, nothing to launch"); } @@ -94,7 +98,7 @@ impl ScoreListWidget { execution.state = ExecutionState::RUNNING; info!("Launch execution {:?}", execution); self.sender - .send(HarmonyTuiEvent::LaunchScore(execution.score.clone())) + .send(HarmonyTuiEvent::LaunchScore(execution.score.clone_box())) .await .expect("Should be able to send message"); } @@ -123,16 +127,21 @@ impl ScoreListWidget { } } -impl Widget for &ScoreListWidget { +impl Widget for &ScoreListWidget { fn render(self, area: ratatui::prelude::Rect, buf: &mut ratatui::prelude::Buffer) where Self: Sized, { let mut list_state = self.list_state.write().unwrap(); - let list = List::new(&self.scores) + let scores_items: Vec> = self.scores.iter().map(score_to_list_item).collect(); + let list = List::new(scores_items) .highlight_style(Style::new().bold().italic()) .highlight_symbol("🠊 "); StatefulWidget::render(list, area, buf, &mut list_state) } } + +fn score_to_list_item<'a, T: Topology>(score: &'a Box>) -> ListItem<'a> { + ListItem::new(score.name()) +} diff --git a/harmony_types/Cargo.toml b/harmony_types/Cargo.toml index 9b2f97a..0b8c068 100644 --- a/harmony_types/Cargo.toml +++ b/harmony_types/Cargo.toml @@ -4,3 +4,6 @@ edition = "2024" version.workspace = true readme.workspace = true license.workspace = true + +[dependencies] +serde = { version = "1.0.209", features = ["derive"] } diff --git a/harmony_types/src/lib.rs b/harmony_types/src/lib.rs index 71dbbaf..9f4930d 100644 --- a/harmony_types/src/lib.rs +++ b/harmony_types/src/lib.rs @@ -1,5 +1,7 @@ pub mod net { - #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] + use serde::Serialize; + + #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize)] pub struct MacAddress(pub [u8; 6]); impl MacAddress { diff --git a/opnsense-config-xml/src/data/dhcpd.rs b/opnsense-config-xml/src/data/dhcpd.rs index 6e694b5..5b06610 100644 --- a/opnsense-config-xml/src/data/dhcpd.rs +++ b/opnsense-config-xml/src/data/dhcpd.rs @@ -4,13 +4,6 @@ use yaserde::MaybeString; use super::opnsense::{NumberOption, Range, StaticMap}; -// #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] -// #[yaserde(rename = "dhcpd")] -// pub struct Dhcpd { -// #[yaserde(rename = "lan")] -// pub lan: DhcpInterface, -// } - #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct DhcpInterface { pub enable: Option, @@ -42,77 +35,3 @@ pub struct DhcpRange { #[yaserde(rename = "to")] pub to: String, } - -#[cfg(test)] -mod test { - use crate::xml_utils::to_xml_str; - - use pretty_assertions::assert_eq; - - #[test] - fn dhcpd_should_deserialize_serialize_identical() { - let dhcpd: Dhcpd = - yaserde::de::from_str(SERIALIZED_DHCPD).expect("Deserialize Dhcpd failed"); - - assert_eq!( - to_xml_str(&dhcpd).expect("Serialize Dhcpd failed"), - SERIALIZED_DHCPD - ); - } - - const SERIALIZED_DHCPD: &str = " - - - 1 - 192.168.20.1 - somedomain.yourlocal.mcd - hmac-md5 - - - - - 192.168.20.50 - 192.168.20.200 - - - 192.168.20.1 - - - 55:55:55:55:55:1c - 192.168.20.160 - somehost983 - someservire8 - - - - - - 55:55:55:55:55:1c - 192.168.20.155 - somehost893 - - - - - - 55:55:55:55:55:1c - 192.168.20.165 - somehost893 - - - - - - - 55:55:55:55:55:1c - 192.168.20.50 - hostswitch2 - switch-2 (bottom) - - - - - - -\n"; -} diff --git a/opnsense-config-xml/src/data/interfaces.rs b/opnsense-config-xml/src/data/interfaces.rs index 4e518c7..e0a84d3 100644 --- a/opnsense-config-xml/src/data/interfaces.rs +++ b/opnsense-config-xml/src/data/interfaces.rs @@ -132,22 +132,18 @@ mod test { - - - - diff --git a/opnsense-config/src/config/config.rs b/opnsense-config/src/config/config.rs index 3f71d55..10dab61 100644 --- a/opnsense-config/src/config/config.rs +++ b/opnsense-config/src/config/config.rs @@ -11,6 +11,7 @@ use crate::{ use log::{debug, info, trace, warn}; use opnsense_config_xml::OPNsense; use russh::client; +use serde::Serialize; use super::{ConfigManager, OPNsenseShell}; @@ -21,6 +22,15 @@ pub struct Config { shell: Arc, } +impl Serialize for Config { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + todo!() + } +} + impl Config { pub async fn new( repository: Arc,