feat: Application module architecture and placeholder features
Some checks failed
Run Check Script / check (pull_request) Failing after 44s

With this architecture, we have an extensible application module for which we can easily define new features and add them to application scores.

All this is driven by the ApplicationInterpret, who understands features and make sure they are "installed".

The drawback of this design is that we now have three different places to launch scores within Harmony : Maestro, Topology and Interpret. This is an architectural smell and I am not sure how to deal with it at the moment.

However, all these places where execution is performed make sense semantically : an ApplicationInterpret must understand ApplicationFeatures and can very well be responsible of them. Same goes for a Topology which provides features itself by composition (ex. K8sAnywhereTopology implements TenantManager) so it is natural for this very implementation to know how to install itself.
This commit is contained in:
Jean-Gabriel Gill-Couture 2025-06-30 15:40:18 -04:00
parent 923de4506e
commit 34abe3af24
7 changed files with 111 additions and 74 deletions

View File

@ -8,11 +8,19 @@ use kube::{
use log::{debug, error, trace};
use serde::de::DeserializeOwned;
#[derive(new)]
#[derive(new, Clone)]
pub struct K8sClient {
client: Client,
}
impl std::fmt::Debug for K8sClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// This is a poor man's debug implementation for now as kube::Client does not provide much
// useful information
f.write_fmt(format_args!("K8sClient {{ kube client using default namespace {} }}", self.client.default_namespace()))
}
}
impl K8sClient {
pub async fn try_default() -> Result<Self, Error> {
Ok(Self {

View File

@ -3,6 +3,7 @@ use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use inquire::Confirm;
use log::{debug, info, warn};
use serde::Serialize;
use tokio::sync::OnceCell;
use crate::{
@ -20,22 +21,24 @@ use super::{
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
};
#[derive(Clone, Debug)]
struct K8sState {
client: Arc<K8sClient>,
_source: K8sSource,
message: String,
}
#[derive(Debug)]
#[derive(Debug, Clone)]
enum K8sSource {
LocalK3d,
Kubeconfig,
}
#[derive(Clone, Debug)]
pub struct K8sAnywhereTopology {
k8s_state: OnceCell<Option<K8sState>>,
tenant_manager: OnceCell<K8sTenantManager>,
config: K8sAnywhereConfig,
k8s_state: Arc<OnceCell<Option<K8sState>>>,
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
config: Arc<K8sAnywhereConfig>,
}
#[async_trait]
@ -55,20 +58,28 @@ impl K8sclient for K8sAnywhereTopology {
}
}
impl Serialize for K8sAnywhereTopology {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer {
todo!()
}
}
impl K8sAnywhereTopology {
pub fn from_env() -> Self {
Self {
k8s_state: OnceCell::new(),
tenant_manager: OnceCell::new(),
config: K8sAnywhereConfig::from_env(),
k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()),
config: Arc::new(K8sAnywhereConfig::from_env()),
}
}
pub fn with_config(config: K8sAnywhereConfig) -> Self {
Self {
k8s_state: OnceCell::new(),
tenant_manager: OnceCell::new(),
config,
k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()),
config: Arc::new(config),
}
}
@ -200,6 +211,7 @@ impl K8sAnywhereTopology {
}
}
#[derive(Clone, Debug)]
pub struct K8sAnywhereConfig {
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
/// cluster

View File

@ -22,7 +22,7 @@ use serde_json::json;
use super::{TenantConfig, TenantManager};
#[derive(new)]
#[derive(new, Clone, Debug)]
pub struct K8sTenantManager {
k8s_client: Arc<K8sClient>,
}

View File

@ -1,6 +1,7 @@
use async_trait::async_trait;
use log::info;
use crate::modules::application::{Application, ApplicationFeature};
use crate::{modules::application::{Application, ApplicationFeature}, topology::{K8sclient, Topology}};
#[derive(Debug)]
pub struct PublicEndpoint {
@ -22,9 +23,11 @@ impl Default for PublicEndpoint {
}
}
/// For now we only suport K8s ingress, but we will support more stuff at some point
#[async_trait]
impl ApplicationFeature for PublicEndpoint {
async fn ensure_installed(&self) -> Result<(), String> {
impl <T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
info!("Making sure public endpoint is installed for port {}", self.application_port);
todo!()
}
@ -36,40 +39,3 @@ impl ApplicationFeature for PublicEndpoint {
todo!()
}
}
// Design options here :
//
// 1. Forget about ApplicationFeature trait. The Features are just other scores that are passed on to
// the ApplicationInterpret as children (and we can rename children dependencies maybe?)
//
// 2. Go forward with the ApplicationFeature trait. There are important question marks here :
// - What about the installation lifecycle management? This was defined as being handled by a
// Topology. The thing here is that I am not sure wether application features belong at the
// Topology level or not. Functionnaly they are pretty similar. Topology provides software
// infrastructure features that Scores will then install themselves on. Most of the time those very
// features are installed using Scores with lower level dependencies. For example, :
//
// AlertingFeature depends on T: Topology + AlertSender
// AlertSender is implemented as KubePrometheus which depends on T: Topology + HelmCommand
// HelmCommand relies on T: Topology + K8sClient
//
// With that said, would it work with `features: Vec<box dyn Score<T>>` instead of `features:
// Vec<box dyn ApplicationFeature>>` ?
//
// Let's unpack this :
//
// RustWebappScore<T: Topology> {
// features: Vec<box dyn Score<T>>,
// }
//
// This brings in a significant problem : RustWebappScore becomes generic, which is a problem for
// Clone and Serialize bounds.
//
// But that can be fixed easily I think ?
//
// RustWebappScore<T: Topology + Clone + Serialize> {
// features: Vec<box dyn Score<T>>,
// }
//
// Oh right not quite because it is `dyn`.
//

View File

@ -1,6 +1,9 @@
mod endpoint;
use async_trait::async_trait;
pub use endpoint::*;
use log::info;
use crate::topology::{HelmCommand, Topology};
use super::ApplicationFeature;
@ -8,29 +11,72 @@ use super::ApplicationFeature;
pub struct SoftwareQualityChecks {}
#[async_trait]
impl ApplicationFeature for SoftwareQualityChecks {
async fn ensure_installed(&self) -> Result<(), String> {
impl<T: Topology + 'static> ApplicationFeature<T> for SoftwareQualityChecks {
// Either allow ApplicationFeature to self-install, which means passing Topology and Inventory
// here. This would be a very easy thing to be done reliably by the ApplicationInterpret.
// However, I feel like this would probably better be a list of Scores (or some sort of
// executable) to be orchestrated by the maestro. We will soon have to manage more complex
// lifecycles, dependencies, parallelism, etc.
//
//
// Or change the ApplicationFeature trait to a Score trait.
//
// For now I'll go with the first option
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
info!("Ensuring SoftwareQualityChecks are installed for application");
todo!()
}
async fn is_installed(&self) -> Result<bool, String> {
todo!()
}
async fn uninstall(&self) -> Result<(), String> {
todo!()
}
}
/// ContinuousDelivery in Harmony provides this functionality :
///
/// - **Package** the application
/// - **Push** to an artifact registry
/// - **Deploy** to a testing environment
/// - **Deploy** to a production environment
///
/// It is intended to be used as an application feature passed down to an ApplicationInterpret. For
/// example :
///
/// ```rust
/// let app = RustApplicationScore {
/// name: "My Rust App".to_string(),
/// features: vec![ContinuousDelivery::default()],
/// };
/// ```
///
/// *Note :*
///
/// By default, the Harmony Opinionated Pipeline is built using these technologies :
///
/// - Gitea Action (executes pipeline steps)
/// - Docker to build an OCI container image
/// - Helm chart to package Kubernetes resources
/// - Harbor as artifact registru
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
/// - Kubernetes for runtime orchestration
#[derive(Debug, Default)]
pub struct ContinuousDelivery {}
#[async_trait]
impl ApplicationFeature for ContinuousDelivery {
async fn ensure_installed(&self) -> Result<(), String> {
impl <T: Topology + 'static> ApplicationFeature<T> for ContinuousDelivery {
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
info!("Installing ContinuousDelivery feature");
todo!()
}
async fn is_installed(&self) -> Result<bool, String> {
todo!()
}
async fn uninstall(&self) -> Result<(), String> {
todo!()
}
@ -40,10 +86,12 @@ impl ApplicationFeature for ContinuousDelivery {
pub struct Monitoring {}
#[async_trait]
impl ApplicationFeature for Monitoring {
async fn ensure_installed(&self) -> Result<(), String> {
todo!()
impl <T: Topology + HelmCommand + 'static> ApplicationFeature<T> for Monitoring {
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
info!("Ensuring monitoring is available for application");
todo!("create and execute k8s prometheus score, depends on Will's work")
}
async fn is_installed(&self) -> Result<bool, String> {
todo!()
}

View File

@ -1,5 +1,5 @@
mod rust;
pub mod features;
mod rust;
pub use rust::*;
use async_trait::async_trait;
@ -32,13 +32,17 @@ impl<T: Topology> Score<T> for GoApplicationScore {
}
#[derive(Debug)]
pub struct ApplicationInterpret {
features: Vec<Box<dyn ApplicationFeature>>,
pub struct ApplicationInterpret<T: Topology + std::fmt::Debug> {
features: Vec<Box<dyn ApplicationFeature<T>>>,
}
#[async_trait]
impl<T: Topology> Interpret<T> for ApplicationInterpret {
async fn execute(&self, _inventory: &Inventory, _topology: &T) -> Result<Outcome, InterpretError> {
impl<T: Topology + std::fmt::Debug> Interpret<T> for ApplicationInterpret<T> {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
todo!()
}
@ -64,13 +68,13 @@ trait Application {}
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
/// ContinuousIntegration, ContinuousDelivery
#[async_trait]
pub trait ApplicationFeature: std::fmt::Debug + Send + Sync {
async fn ensure_installed(&self) -> Result<(), String>;
pub trait ApplicationFeature<T: Topology>: std::fmt::Debug + Send + Sync {
async fn ensure_installed(&self, topology: &T) -> Result<(), String>;
async fn is_installed(&self) -> Result<bool, String>;
async fn uninstall(&self) -> Result<(), String>;
}
impl Serialize for Box<dyn ApplicationFeature> {
impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
@ -79,7 +83,7 @@ impl Serialize for Box<dyn ApplicationFeature> {
}
}
impl Clone for Box<dyn ApplicationFeature> {
impl<T: Topology> Clone for Box<dyn ApplicationFeature<T>> {
fn clone(&self) -> Self {
todo!()
}
@ -89,8 +93,8 @@ impl Clone for Box<dyn ApplicationFeature> {
pub struct BackupFeature;
#[async_trait]
impl ApplicationFeature for BackupFeature {
async fn ensure_installed(&self) -> Result<(), String> {
impl <T: Topology > ApplicationFeature<T> for BackupFeature {
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
todo!()
}

View File

@ -8,13 +8,13 @@ use crate::{
use super::{ApplicationFeature, ApplicationInterpret};
#[derive(Debug, Serialize, Clone)]
pub struct RustWebappScore {
pub struct RustWebappScore<T: Topology + Clone + Serialize> {
pub name: String,
pub domain: Url,
pub features: Vec<Box<dyn ApplicationFeature>>,
pub features: Vec<Box<dyn ApplicationFeature<T>>>,
}
impl<T: Topology> Score<T> for RustWebappScore {
impl<T: Topology + std::fmt::Debug + Clone + Serialize + 'static> Score<T> for RustWebappScore<T> {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(ApplicationInterpret { features: todo!() })
}
@ -23,4 +23,3 @@ impl<T: Topology> Score<T> for RustWebappScore {
format!("{}-RustWebapp", self.name)
}
}