harmony/adr/003-abstractions/topology/src/main_gemini25pro.rs
Jean-Gabriel Gill-Couture 00e71b97f6
All checks were successful
Run Check Script / check (push) Successful in 1m49s
Run Check Script / check (pull_request) Successful in 1m48s
chore: Move ADR helper files into folders with their corresponding ADR number
2025-06-09 13:54:23 -04:00

370 lines
13 KiB
Rust

// Import necessary items (though for this example, few are needed beyond std)
use std::fmt;
// --- Error Handling ---
// A simple error type for demonstration purposes. In a real app, use `thiserror` or `anyhow`.
#[derive(Debug)]
enum OrchestrationError {
CommandFailed(String),
KubeClientError(String),
TopologySetupFailed(String),
}
impl fmt::Display for OrchestrationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
OrchestrationError::CommandFailed(e) => write!(f, "Command execution failed: {}", e),
OrchestrationError::KubeClientError(e) => write!(f, "Kubernetes client error: {}", e),
OrchestrationError::TopologySetupFailed(e) => write!(f, "Topology setup failed: {}", e),
}
}
}
impl std::error::Error for OrchestrationError {}
// Define a common Result type
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
// --- 1. Capability Specification (as Traits) ---
/// Capability trait representing the ability to run Linux commands.
/// This follows the "Parse, Don't Validate" idea implicitly - if you have an object
/// implementing this, you know you *can* run commands, no need to check later.
trait LinuxOperations {
fn run_command(&self, command: &str) -> Result<String>;
}
/// A mock Kubernetes client trait for demonstration.
trait KubeClient {
fn apply_manifest(&self, manifest: &str) -> Result<()>;
fn get_pods(&self, namespace: &str) -> Result<Vec<String>>;
}
/// Mock implementation of a KubeClient.
struct MockKubeClient {
cluster_name: String,
}
impl KubeClient for MockKubeClient {
fn apply_manifest(&self, manifest: &str) -> Result<()> {
println!(
"[{}] Applying Kubernetes manifest:\n---\n{}\n---",
self.cluster_name, manifest
);
// Simulate success or failure
if manifest.contains("invalid") {
Err(Box::new(OrchestrationError::KubeClientError(
"Invalid manifest content".into(),
)))
} else {
Ok(())
}
}
fn get_pods(&self, namespace: &str) -> Result<Vec<String>> {
println!(
"[{}] Getting pods in namespace '{}'",
self.cluster_name, namespace
);
Ok(vec![
format!("pod-a-12345-{}-{}", namespace, self.cluster_name),
format!("pod-b-67890-{}-{}", namespace, self.cluster_name),
])
}
}
/// Capability trait representing access to a Kubernetes cluster.
/// This follows Rust Embedded WG's "Zero-Cost Abstractions" - the trait itself
/// adds no runtime overhead, only compile-time structure.
trait KubernetesCluster {
// Provides access to a Kubernetes client instance.
// Using `impl Trait` in return position for flexibility.
fn get_kube_client(&self) -> Result<impl KubeClient>;
}
// --- 2. Topology Implementations ---
// Topologies implement the capabilities they provide.
/// Represents a basic Linux host.
#[derive(Debug, Clone)]
struct LinuxHostTopology {
hostname: String,
// In a real scenario: SSH connection details, etc.
}
impl LinuxHostTopology {
fn new(hostname: &str) -> Self {
println!("Initializing LinuxHostTopology for {}", hostname);
Self {
hostname: hostname.to_string(),
}
}
}
// LinuxHostTopology provides LinuxOperations capability.
impl LinuxOperations for LinuxHostTopology {
fn run_command(&self, command: &str) -> Result<String> {
println!("[{}] Running command: '{}'", self.hostname, command);
// Simulate command execution (e.g., via SSH)
if command.starts_with("fail") {
Err(Box::new(OrchestrationError::CommandFailed(format!(
"Command '{}' failed",
command
))))
} else {
Ok(format!("Output of '{}' on {}", command, self.hostname))
}
}
}
/// Represents a K3D (Kubernetes in Docker) cluster running on a host.
#[derive(Debug, Clone)]
struct K3DTopology {
cluster_name: String,
host_os: String, // Example: might implicitly run commands on the underlying host
// In a real scenario: Kubeconfig path, Docker client, etc.
}
impl K3DTopology {
fn new(cluster_name: &str) -> Self {
println!("Initializing K3DTopology for cluster {}", cluster_name);
Self {
cluster_name: cluster_name.to_string(),
host_os: "Linux".to_string(), // Assume k3d runs on Linux for this example
}
}
}
// K3DTopology provides KubernetesCluster capability.
impl KubernetesCluster for K3DTopology {
fn get_kube_client(&self) -> Result<impl KubeClient> {
println!("[{}] Creating mock Kubernetes client", self.cluster_name);
// In a real scenario, this would initialize a client using kubeconfig etc.
Ok(MockKubeClient {
cluster_name: self.cluster_name.clone(),
})
}
}
// K3DTopology *also* provides LinuxOperations (e.g., for running commands inside nodes or on the host managing k3d).
impl LinuxOperations for K3DTopology {
fn run_command(&self, command: &str) -> Result<String> {
println!(
"[{} on {} host] Running command: '{}'",
self.cluster_name, self.host_os, command
);
// Simulate command execution (maybe `docker exec` or similar)
if command.starts_with("fail") {
Err(Box::new(OrchestrationError::CommandFailed(format!(
"Command '{}' failed within k3d context",
command
))))
} else {
Ok(format!(
"Output of '{}' within k3d cluster {}",
command, self.cluster_name
))
}
}
}
// --- 3. Score Implementations ---
// Scores require capabilities via trait bounds on their execution logic.
/// Base trait for identifying scores. Could be empty or hold metadata.
trait Score {
fn name(&self) -> &'static str;
// We don't put execute here, as its signature depends on required capabilities.
}
/// A score that runs a shell command on a Linux host.
#[derive(Debug)]
struct CommandScore {
command: String,
}
impl Score for CommandScore {
fn name(&self) -> &'static str {
"CommandScore"
}
}
impl CommandScore {
fn new(command: &str) -> Self {
Self {
command: command.to_string(),
}
}
/// Execute method is generic over T, but requires T implements LinuxOperations.
/// This follows the "Scores as Polymorphic Functions" idea.
fn execute<T: LinuxOperations + ?Sized>(&self, topology: &T) -> Result<()> {
println!("Executing Score: {}", Score::name(self));
let output = topology.run_command(&self.command)?;
println!("Command Score Output: {}", output);
Ok(())
}
}
/// A score that applies a Kubernetes resource manifest.
#[derive(Debug)]
struct K8sResourceScore {
manifest_path: String, // Path or content
}
impl Score for K8sResourceScore {
fn name(&self) -> &'static str {
"K8sResourceScore"
}
}
impl K8sResourceScore {
fn new(manifest_path: &str) -> Self {
Self {
manifest_path: manifest_path.to_string(),
}
}
/// Execute method requires T implements KubernetesCluster.
fn execute<T: KubernetesCluster + ?Sized>(&self, topology: &T) -> Result<()> {
println!("Executing Score: {}", Score::name(self));
let client = topology.get_kube_client()?;
let manifest_content = format!(
"apiVersion: v1\nkind: Pod\nmetadata:\n name: my-pod-from-{}",
self.manifest_path
); // Simulate reading file
client.apply_manifest(&manifest_content)?;
println!(
"K8s Resource Score applied manifest: {}",
self.manifest_path
);
Ok(())
}
}
// --- 4. Maestro (The Orchestrator) ---
// This version of Maestro uses a helper trait (`ScoreRunner`) to enable
// storing heterogeneous scores while preserving compile-time checks.
/// A helper trait to erase the specific capability requirements *after*
/// the compiler has verified them, allowing storage in a Vec.
/// The verification happens in the blanket impls below.
trait ScoreRunner<T> {
// T is the concrete Topology type
fn run(&self, topology: &T) -> Result<()>;
fn name(&self) -> &'static str;
}
// Blanket implementation: A CommandScore can be run on any Topology T
// *if and only if* T implements LinuxOperations.
// The compiler checks this bound when `add_score` is called.
impl<T: LinuxOperations> ScoreRunner<T> for CommandScore {
fn run(&self, topology: &T) -> Result<()> {
self.execute(topology) // Call the capability-specific execute method
}
fn name(&self) -> &'static str {
Score::name(self)
}
}
// Blanket implementation: A K8sResourceScore can be run on any Topology T
// *if and only if* T implements KubernetesCluster.
impl<T: KubernetesCluster> ScoreRunner<T> for K8sResourceScore {
fn run(&self, topology: &T) -> Result<()> {
self.execute(topology) // Call the capability-specific execute method
}
fn name(&self) -> &'static str {
Score::name(self)
}
}
/// The Maestro orchestrator, strongly typed to a specific Topology `T`.
struct Maestro<T> {
topology: T,
// Stores type-erased runners, but addition is type-safe.
scores: Vec<Box<dyn ScoreRunner<T>>>,
}
impl<T> Maestro<T> {
/// Creates a new Maestro instance bound to a specific topology.
fn new(topology: T) -> Self {
println!("Maestro initialized.");
Maestro {
topology,
scores: Vec::new(),
}
}
/// Adds a score to the Maestro.
/// **Compile-time check happens here!**
/// The `S: ScoreRunner<T>` bound ensures that the score `S` provides an
/// implementation of `ScoreRunner` *for the specific topology type `T`*.
/// The blanket impls above ensure this is only possible if `T` has the
/// required capabilities for `S`.
/// This directly follows the "Theoretical Example: The Compiler as an Ally".
fn add_score<S>(&mut self, score: S)
where
S: Score + ScoreRunner<T> + 'static, // S must be runnable on *this* T
{
println!("Registering score: {}", Score::name(&score));
self.scores.push(Box::new(score));
}
/// Runs all registered scores sequentially on the topology.
fn run_all(&self) -> Vec<Result<()>> {
println!("\n--- Running all scores ---");
self.scores
.iter()
.map(|score_runner| {
println!("---");
let result = score_runner.run(&self.topology);
match &result {
Ok(_) => println!("Score '{}' completed successfully.", score_runner.name()),
Err(e) => eprintln!("Score '{}' failed: {}", score_runner.name(), e),
}
result
})
.collect()
}
}
// --- 5. Example Usage ---
fn main() {
println!("=== Scenario 1: Linux Host Topology ===");
let linux_host = LinuxHostTopology::new("server1.example.com");
let mut maestro_linux = Maestro::new(linux_host);
// Add scores compatible with LinuxHostTopology (which has LinuxOperations)
maestro_linux.add_score(CommandScore::new("uname -a"));
maestro_linux.add_score(CommandScore::new("ls -l /tmp"));
// *** Compile-time Error Example ***
// Try adding a score that requires KubernetesCluster capability.
// This line WILL NOT COMPILE because LinuxHostTopology does not implement KubernetesCluster,
// therefore K8sResourceScore does not implement ScoreRunner<LinuxHostTopology>.
// maestro_linux.add_score(K8sResourceScore::new("my-app.yaml"));
// Uncomment the line above to see the compiler error! The error message will
// likely point to the `ScoreRunner<LinuxHostTopology>` bound not being satisfied
// for `K8sResourceScore`.
let results_linux = maestro_linux.run_all();
println!("\nLinux Host Results: {:?}", results_linux);
println!("\n=== Scenario 2: K3D Topology ===");
let k3d_cluster = K3DTopology::new("dev-cluster");
let mut maestro_k3d = Maestro::new(k3d_cluster);
// Add scores compatible with K3DTopology (which has LinuxOperations AND KubernetesCluster)
maestro_k3d.add_score(CommandScore::new("pwd")); // Uses LinuxOperations
maestro_k3d.add_score(K8sResourceScore::new("nginx-deployment.yaml")); // Uses KubernetesCluster
maestro_k3d.add_score(K8sResourceScore::new("invalid-service.yaml")); // Test error case
maestro_k3d.add_score(CommandScore::new("fail please")); // Test error case
let results_k3d = maestro_k3d.run_all();
println!("\nK3D Cluster Results: {:?}", results_k3d);
println!("\n=== Compile-Time Safety Demonstrated ===");
println!("(Check the commented-out line in the code for the compile error example)");
}