332 lines
11 KiB
Rust
332 lines
11 KiB
Rust
use std::{process::Command, sync::Arc};
|
|
|
|
use async_trait::async_trait;
|
|
use inquire::Confirm;
|
|
use log::{debug, info, warn};
|
|
use serde::Serialize;
|
|
use tokio::sync::OnceCell;
|
|
|
|
use crate::{
|
|
executors::ExecutorError,
|
|
interpret::{InterpretError, Outcome},
|
|
inventory::Inventory,
|
|
maestro::Maestro,
|
|
modules::k3d::K3DInstallationScore,
|
|
topology::LocalhostTopology,
|
|
};
|
|
|
|
use super::{
|
|
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology,
|
|
k8s::K8sClient,
|
|
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
|
|
};
|
|
|
|
#[derive(Clone, Debug)]
|
|
struct K8sState {
|
|
client: Arc<K8sClient>,
|
|
_source: K8sSource,
|
|
message: String,
|
|
}
|
|
|
|
#[derive(Debug, Clone)]
|
|
enum K8sSource {
|
|
LocalK3d,
|
|
Kubeconfig,
|
|
}
|
|
|
|
#[derive(Clone, Debug)]
|
|
pub struct K8sAnywhereTopology {
|
|
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
|
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
|
config: Arc<K8sAnywhereConfig>,
|
|
}
|
|
|
|
#[async_trait]
|
|
impl K8sclient for K8sAnywhereTopology {
|
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
|
let state = match self.k8s_state.get() {
|
|
Some(state) => state,
|
|
None => return Err("K8s state not initialized yet".to_string()),
|
|
};
|
|
|
|
let state = match state {
|
|
Some(state) => state,
|
|
None => return Err("K8s client initialized but empty".to_string()),
|
|
};
|
|
|
|
Ok(state.client.clone())
|
|
}
|
|
}
|
|
|
|
impl Serialize for K8sAnywhereTopology {
|
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
where
|
|
S: serde::Serializer,
|
|
{
|
|
todo!()
|
|
}
|
|
}
|
|
|
|
impl K8sAnywhereTopology {
|
|
pub fn from_env() -> Self {
|
|
Self {
|
|
k8s_state: Arc::new(OnceCell::new()),
|
|
tenant_manager: Arc::new(OnceCell::new()),
|
|
config: Arc::new(K8sAnywhereConfig::from_env()),
|
|
}
|
|
}
|
|
|
|
pub fn with_config(config: K8sAnywhereConfig) -> Self {
|
|
Self {
|
|
k8s_state: Arc::new(OnceCell::new()),
|
|
tenant_manager: Arc::new(OnceCell::new()),
|
|
config: Arc::new(config),
|
|
}
|
|
}
|
|
|
|
fn is_helm_available(&self) -> Result<(), String> {
|
|
let version_result = Command::new("helm")
|
|
.arg("version")
|
|
.output()
|
|
.map_err(|e| format!("Failed to execute 'helm -version': {}", e))?;
|
|
|
|
if !version_result.status.success() {
|
|
return Err("Failed to run 'helm -version'".to_string());
|
|
}
|
|
|
|
// Print the version output
|
|
let version_output = String::from_utf8_lossy(&version_result.stdout);
|
|
println!("Helm version: {}", version_output.trim());
|
|
|
|
Ok(())
|
|
}
|
|
|
|
async fn try_load_system_kubeconfig(&self) -> Option<K8sClient> {
|
|
todo!("Use kube-rs default behavior to load system kubeconfig");
|
|
}
|
|
|
|
async fn try_load_kubeconfig(&self, path: &str) -> Option<K8sClient> {
|
|
K8sClient::from_kubeconfig(path).await
|
|
}
|
|
|
|
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
|
|
K3DInstallationScore::default()
|
|
}
|
|
|
|
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
|
|
let maestro = Maestro::initialize(Inventory::autoload(), LocalhostTopology::new()).await?;
|
|
let k3d_score = self.get_k3d_installation_score();
|
|
maestro.interpret(Box::new(k3d_score)).await?;
|
|
Ok(())
|
|
}
|
|
|
|
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
|
|
let k8s_anywhere_config = &self.config;
|
|
|
|
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
|
|
// be careful when making modifications here
|
|
if k8s_anywhere_config.use_local_k3d {
|
|
info!("Using local k3d cluster because of use_local_k3d set to true");
|
|
} else {
|
|
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
|
|
debug!("Loading kubeconfig {kubeconfig}");
|
|
match self.try_load_kubeconfig(&kubeconfig).await {
|
|
Some(client) => {
|
|
return Ok(Some(K8sState {
|
|
client: Arc::new(client),
|
|
_source: K8sSource::Kubeconfig,
|
|
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
|
|
}));
|
|
}
|
|
None => {
|
|
return Err(InterpretError::new(format!(
|
|
"Failed to load kubeconfig from {kubeconfig}"
|
|
)));
|
|
}
|
|
}
|
|
}
|
|
|
|
if k8s_anywhere_config.use_system_kubeconfig {
|
|
debug!("Loading system kubeconfig");
|
|
match self.try_load_system_kubeconfig().await {
|
|
Some(_client) => todo!(),
|
|
None => todo!(),
|
|
}
|
|
}
|
|
|
|
info!("No kubernetes configuration found");
|
|
}
|
|
|
|
if !k8s_anywhere_config.autoinstall {
|
|
debug!("Autoinstall confirmation prompt");
|
|
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
|
|
.with_default(false)
|
|
.prompt()
|
|
.expect("Unexpected prompt error");
|
|
debug!("Autoinstall confirmation {confirmation}");
|
|
|
|
if !confirmation {
|
|
warn!(
|
|
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
|
|
);
|
|
return Ok(None);
|
|
}
|
|
}
|
|
|
|
info!("Starting K8sAnywhere installation");
|
|
self.try_install_k3d().await?;
|
|
let k3d_score = self.get_k3d_installation_score();
|
|
// I feel like having to rely on the k3d_rs crate here is a smell
|
|
// I think we should have a way to interact more deeply with scores/interpret. Maybe the
|
|
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
|
|
// good implementation due to the stateful nature of the k3d thing. Which is why I went
|
|
// with this solution for now
|
|
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
|
|
let state = match k3d.get_client().await {
|
|
Ok(client) => K8sState {
|
|
client: Arc::new(K8sClient::new(client)),
|
|
_source: K8sSource::LocalK3d,
|
|
message: "Successfully installed K3D cluster and acquired client".to_string(),
|
|
},
|
|
Err(_) => todo!(),
|
|
};
|
|
|
|
Ok(Some(state))
|
|
}
|
|
|
|
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
|
|
if let Some(_) = self.tenant_manager.get() {
|
|
return Ok(());
|
|
}
|
|
|
|
self.tenant_manager
|
|
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
|
|
let k8s_client = self.k8s_client().await?;
|
|
Ok(K8sTenantManager::new(k8s_client))
|
|
})
|
|
.await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> {
|
|
match self.tenant_manager.get() {
|
|
Some(t) => Ok(t),
|
|
None => Err(ExecutorError::UnexpectedError(
|
|
"K8sTenantManager not available".to_string(),
|
|
)),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Clone, Debug)]
|
|
pub struct K8sAnywhereConfig {
|
|
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
|
|
/// cluster
|
|
///
|
|
/// Default : None
|
|
pub kubeconfig: Option<String>,
|
|
|
|
/// Whether to use the system KUBECONFIG, either the environment variable or the file in the
|
|
/// default or configured location
|
|
///
|
|
/// Default : false
|
|
pub use_system_kubeconfig: bool,
|
|
|
|
/// Whether to install automatically a kubernetes cluster
|
|
///
|
|
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
|
|
///
|
|
/// Default: false
|
|
pub autoinstall: bool,
|
|
|
|
/// Whether to use local k3d cluster.
|
|
///
|
|
/// Takes precedence over other options, useful to avoid messing up a remote cluster by mistake
|
|
///
|
|
/// default: true
|
|
pub use_local_k3d: bool,
|
|
pub harmony_profile: String,
|
|
}
|
|
|
|
impl K8sAnywhereConfig {
|
|
fn from_env() -> Self {
|
|
Self {
|
|
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
|
|
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
|
|
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
|
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
|
|
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
|
// TODO harmony_profile should be managed at a more core level than this
|
|
harmony_profile: std::env::var("HARMONY_PROFILE").map_or_else(
|
|
|_| "dev".to_string(),
|
|
|v| v.parse().ok().unwrap_or("dev".to_string()),
|
|
),
|
|
use_local_k3d: std::env::var("HARMONY_USE_LOCAL_K3D")
|
|
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(true)),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[async_trait]
|
|
impl Topology for K8sAnywhereTopology {
|
|
fn name(&self) -> &str {
|
|
"K8sAnywhereTopology"
|
|
}
|
|
|
|
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
|
let k8s_state = self
|
|
.k8s_state
|
|
.get_or_try_init(|| self.try_get_or_install_k8s_client())
|
|
.await?;
|
|
|
|
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
|
|
"No K8s client could be found or installed".to_string(),
|
|
))?;
|
|
|
|
self.ensure_k8s_tenant_manager()
|
|
.await
|
|
.map_err(|e| InterpretError::new(e))?;
|
|
|
|
match self.is_helm_available() {
|
|
Ok(()) => Ok(Outcome::success(format!(
|
|
"{} + helm available",
|
|
k8s_state.message.clone()
|
|
))),
|
|
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl MultiTargetTopology for K8sAnywhereTopology {
|
|
fn current_target(&self) -> DeploymentTarget {
|
|
if self.config.use_local_k3d {
|
|
return DeploymentTarget::LocalDev;
|
|
}
|
|
|
|
match self.config.harmony_profile.to_lowercase().as_str() {
|
|
"staging" => DeploymentTarget::Staging,
|
|
"production" => DeploymentTarget::Production,
|
|
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl HelmCommand for K8sAnywhereTopology {}
|
|
|
|
#[async_trait]
|
|
impl TenantManager for K8sAnywhereTopology {
|
|
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
|
self.get_k8s_tenant_manager()?
|
|
.provision_tenant(config)
|
|
.await
|
|
}
|
|
|
|
async fn get_tenant_config(&self) -> Option<TenantConfig> {
|
|
self.get_k8s_tenant_manager()
|
|
.ok()?
|
|
.get_tenant_config()
|
|
.await
|
|
}
|
|
}
|