Compare commits
1 Commits
secrets-pr
...
feat/multi
| Author | SHA1 | Date | |
|---|---|---|---|
| ec794f076e |
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -3124,7 +3124,6 @@ dependencies = [
|
||||
"fxhash",
|
||||
"newline-converter",
|
||||
"once_cell",
|
||||
"tempfile",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
15
Cargo.toml
15
Cargo.toml
@@ -14,8 +14,7 @@ members = [
|
||||
"harmony_composer",
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
"harmony_secret",
|
||||
"adr/agent_discovery/mdns",
|
||||
"harmony_secret", "adr/agent_discovery/mdns",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -51,7 +50,7 @@ k8s-openapi = { version = "0.25", features = ["v1_30"] }
|
||||
serde_yaml = "0.9"
|
||||
serde-value = "0.7"
|
||||
http = "1.2"
|
||||
inquire = { version = "0.7", features = ["editor"] }
|
||||
inquire = "0.7"
|
||||
convert_case = "0.8"
|
||||
chrono = "0.4"
|
||||
similar = "2"
|
||||
@@ -67,11 +66,5 @@ thiserror = "2.0.14"
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
askama = "0.14"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
|
||||
reqwest = { version = "0.12", features = [
|
||||
"blocking",
|
||||
"stream",
|
||||
"rustls-tls",
|
||||
"http2",
|
||||
"json",
|
||||
], default-features = false }
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
||||
|
||||
@@ -27,6 +27,7 @@ async fn main() {
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -17,6 +17,7 @@ use harmony_types::net::Url;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "test-rhob-monitoring".to_string(),
|
||||
domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()),
|
||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -19,6 +19,7 @@ use harmony_macros::hurl;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
domain: hurl!("https://rustapp.harmony.example.com"),
|
||||
project_root: PathBuf::from("./webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring, rhob_monitoring::RHOBMonitoring},
|
||||
features::{ContinuousDelivery, Monitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-tryrust".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./tryrust.org"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
@@ -23,7 +25,7 @@ async fn main() {
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
@@ -31,7 +33,7 @@ async fn main() {
|
||||
Box::new(ContinuousDelivery {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(RHOBMonitoring {
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![Box::new(discord_receiver)],
|
||||
}),
|
||||
|
||||
@@ -10,11 +10,7 @@ testing = []
|
||||
|
||||
[dependencies]
|
||||
hex = "0.4"
|
||||
reqwest = { version = "0.11", features = [
|
||||
"blocking",
|
||||
"json",
|
||||
"rustls-tls",
|
||||
], default-features = false }
|
||||
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
|
||||
russh = "0.45.0"
|
||||
rust-ipmi = "0.1.1"
|
||||
semver = "1.0.23"
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
use crate::topology::PreparationError;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Ingress {
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError>;
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::GroupVersionKind;
|
||||
use log::{debug, info, warn};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
@@ -23,7 +22,6 @@ use crate::{
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::ingress::Ingress,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -200,26 +198,6 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await?;
|
||||
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
|
||||
if ready_replicas >= 1 {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(PreparationError::new(
|
||||
"openshift-ingress-operator not available".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn is_helm_available(&self) -> Result<(), String> {
|
||||
let version_result = Command::new("helm")
|
||||
.arg("version")
|
||||
@@ -372,8 +350,6 @@ impl K8sAnywhereTopology {
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
warn!("Installing observability operator is not supported on LocalK3d source");
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
debug!("installing cluster observability operator");
|
||||
todo!();
|
||||
let op_score =
|
||||
@@ -552,7 +528,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
|
||||
match self.config.harmony_profile.to_lowercase().as_str() {
|
||||
"staging" => DeploymentTarget::Staging,
|
||||
"production" => DeploymentTarget::Production,
|
||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"),
|
||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -574,45 +550,3 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
||||
K8sSource::Kubeconfig => {
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
||||
})?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(PreparationError::new(
|
||||
"Cannot get domain: unable to detect K8s state".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
mod ha_cluster;
|
||||
pub mod ingress;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
|
||||
@@ -14,9 +14,7 @@ use crate::{
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
},
|
||||
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||
};
|
||||
|
||||
/// ContinuousDelivery in Harmony provides this functionality :
|
||||
@@ -52,6 +50,55 @@ pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
|
||||
}
|
||||
|
||||
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
pub async fn deploy<T>(&self, topology: &T, helm_chart: String, image: String) -> Result<(), String>
|
||||
where
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||
{
|
||||
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
|
||||
// by the topology only and we should not have to know how to perform tasks like this for
|
||||
// which the topology should be responsible.
|
||||
//
|
||||
// That said, this will require some careful architectural decisions, since the concept of
|
||||
// deployment targets / profiles is probably a layer of complexity that we won't be
|
||||
// completely able to avoid
|
||||
//
|
||||
// I'll try something for now that must be thought through after : att a deployment_profile
|
||||
// function to the topology trait that returns a profile, then anybody who needs it can
|
||||
// access it. This forces every Topology to understand the concept of targets though... So
|
||||
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
||||
// goes. It still does not feel right though.
|
||||
//
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/106
|
||||
match topology.current_target() {
|
||||
DeploymentTarget::LocalDev => {
|
||||
info!("Deploying {} locally...", self.application.name());
|
||||
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
||||
.await?;
|
||||
}
|
||||
target => {
|
||||
info!("Deploying {} to target {target:?}", self.application.name());
|
||||
|
||||
let score = ArgoHelmScore {
|
||||
namespace: format!("{}", self.application.name()),
|
||||
openshift: true,
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||
values_overrides: None,
|
||||
name: format!("{}", self.application.name()),
|
||||
namespace: format!("{}", self.application.name()),
|
||||
})],
|
||||
};
|
||||
score
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
async fn deploy_to_local_k3d(
|
||||
&self,
|
||||
app_name: String,
|
||||
@@ -138,74 +185,24 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
#[async_trait]
|
||||
impl<
|
||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
let image = self.application.image_name();
|
||||
let domain = topology
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
// url, etc..)
|
||||
// Or ask for it when unknown
|
||||
|
||||
let helm_chart = self
|
||||
.application
|
||||
.build_push_helm_package(&image, &domain)
|
||||
.await?;
|
||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||
|
||||
// TODO: Make building image configurable/skippable if image already exists (prompt)")
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/104
|
||||
let image = self.application.build_push_oci_image().await?;
|
||||
|
||||
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
|
||||
// by the topology only and we should not have to know how to perform tasks like this for
|
||||
// which the topology should be responsible.
|
||||
//
|
||||
// That said, this will require some careful architectural decisions, since the concept of
|
||||
// deployment targets / profiles is probably a layer of complexity that we won't be
|
||||
// completely able to avoid
|
||||
//
|
||||
// I'll try something for now that must be thought through after : att a deployment_profile
|
||||
// function to the topology trait that returns a profile, then anybody who needs it can
|
||||
// access it. This forces every Topology to understand the concept of targets though... So
|
||||
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
||||
// goes. It still does not feel right though.
|
||||
//
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/106
|
||||
match topology.current_target() {
|
||||
DeploymentTarget::LocalDev => {
|
||||
info!("Deploying {} locally...", self.application.name());
|
||||
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
||||
.await?;
|
||||
}
|
||||
target => {
|
||||
info!("Deploying {} to target {target:?}", self.application.name());
|
||||
|
||||
let score = ArgoHelmScore {
|
||||
namespace: format!("{}", self.application.name()),
|
||||
openshift: true,
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||
values_overrides: None,
|
||||
name: format!("{}", self.application.name()),
|
||||
namespace: format!("{}", self.application.name()),
|
||||
})],
|
||||
};
|
||||
score
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
self.deploy(topology, helm_chart, image).await
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"ContinuousDelivery".to_string()
|
||||
|
||||
@@ -13,8 +13,7 @@ use crate::{
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
||||
k8s::K8sClient,
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
@@ -28,7 +27,7 @@ pub struct ArgoHelmScore {
|
||||
pub argo_apps: Vec<ArgoApplication>,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore {
|
||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
Box::new(ArgoInterpret {
|
||||
score: self.clone(),
|
||||
@@ -48,14 +47,17 @@ pub struct ArgoInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret {
|
||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let domain = topology.get_domain("argo").await?;
|
||||
let domain = self
|
||||
.get_host_domain(k8s_client.clone(), self.score.openshift)
|
||||
.await?;
|
||||
let domain = format!("argo.{domain}");
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
mod endpoint;
|
||||
pub mod rhob_monitoring;
|
||||
mod multisite;
|
||||
pub use endpoint::*;
|
||||
|
||||
mod monitoring;
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::modules::application::{Application, ApplicationFeature};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
@@ -17,12 +19,8 @@ use crate::{
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_secret_derive::Secret;
|
||||
use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Monitoring {
|
||||
@@ -38,9 +36,8 @@ impl<
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
||||
+ Ingress
|
||||
+ std::fmt::Debug,
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>,
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
@@ -50,7 +47,6 @@ impl<
|
||||
.await
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||
|
||||
let mut alerting_score = ApplicationMonitoringScore {
|
||||
sender: CRDPrometheus {
|
||||
@@ -62,17 +58,19 @@ impl<
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: domain,
|
||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let config = SecretManager::get_or_prompt::<NtfyAuth>().await.unwrap();
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
let ntfy_default_auth_header = format!(
|
||||
"Basic {}",
|
||||
general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password))
|
||||
general_purpose::STANDARD.encode(format!(
|
||||
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||
))
|
||||
);
|
||||
|
||||
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||
@@ -102,17 +100,9 @@ impl<
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"Monitoring".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Clone, Debug)]
|
||||
struct NtfyAuth {
|
||||
username: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
49
harmony/src/modules/application/features/multisite.rs
Normal file
49
harmony/src/modules/application/features/multisite.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::modules::application::{Application, ApplicationFeature, StatelessApplication};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||
|
||||
use crate::topology::{K8sAnywhereTopology, MultiTargetTopology};
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||
};
|
||||
use crate::{
|
||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
||||
topology::oberservability::monitoring::AlertReceiver,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
|
||||
trait DebugTopology: Topology + std::fmt::Debug {}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Multisite {
|
||||
app: Arc<dyn StatelessApplication>,
|
||||
secondary_site: Arc<K8sAnywhereTopology>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> ApplicationFeature<T> for Multisite {
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
|
||||
todo!(
|
||||
"
|
||||
- Find a way to get pvs for this application
|
||||
- find the pv csi volumes uuid
|
||||
- run rbd mirror image enable --pool mirrored-pool csi-vol-<UUID_PV> snapshot
|
||||
- enjoy
|
||||
"
|
||||
)
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"Multisite".to_string()
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,6 @@ use crate::modules::monitoring::application_monitoring::rhobs_application_monito
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
@@ -38,7 +37,6 @@ impl<
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ Ingress
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||
> ApplicationFeature<T> for RHOBMonitoring
|
||||
@@ -61,10 +59,7 @@ impl<
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: topology
|
||||
.get_domain("ntfy")
|
||||
.await
|
||||
.map_err(|e| format!("Could not get domain {e}"))?,
|
||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
|
||||
@@ -2,6 +2,10 @@ mod feature;
|
||||
pub mod features;
|
||||
pub mod oci;
|
||||
mod rust;
|
||||
mod stateless;
|
||||
mod stateful;
|
||||
pub use stateless::*;
|
||||
pub use stateful::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use feature::*;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::Application;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Application;
|
||||
|
||||
#[async_trait]
|
||||
pub trait OCICompliant: Application {
|
||||
async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here
|
||||
@@ -16,10 +17,5 @@ pub trait HelmPackage: Application {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
|
||||
/// * `domain` - The domain where the application is hosted.
|
||||
async fn build_push_helm_package(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<String, String>;
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::fs::{self};
|
||||
use std::fs::{self, File};
|
||||
use std::io::Read;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
@@ -12,11 +13,12 @@ use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||
use futures_util::StreamExt;
|
||||
use log::{debug, info, log_enabled};
|
||||
use serde::Serialize;
|
||||
use tar::{Builder, Header};
|
||||
use tar::{Archive, Builder, Header};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::{score::Score, topology::Topology};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
|
||||
@@ -56,6 +58,7 @@ pub enum RustWebFramework {
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RustWebapp {
|
||||
pub name: String,
|
||||
pub domain: Url,
|
||||
/// The path to the root of the Rust project to be containerized.
|
||||
pub project_root: PathBuf,
|
||||
pub service_port: u32,
|
||||
@@ -70,17 +73,12 @@ impl Application for RustWebapp {
|
||||
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<String, String> {
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
||||
|
||||
// 1. Create the Helm chart files on disk.
|
||||
let chart_dir = self
|
||||
.create_helm_chart_files(image_url, domain)
|
||||
.await
|
||||
.create_helm_chart_files(image_url)
|
||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
|
||||
@@ -208,7 +206,7 @@ impl RustWebapp {
|
||||
}
|
||||
}
|
||||
|
||||
///normalizes timestamp and ignores files that will bust the docker cach
|
||||
///normalizes timestamp and ignores files that will bust the docker cache
|
||||
async fn create_deterministic_tar(
|
||||
&self,
|
||||
project_root: &std::path::Path,
|
||||
@@ -222,7 +220,6 @@ impl RustWebapp {
|
||||
".git",
|
||||
".github",
|
||||
".harmony_generated",
|
||||
"harmony",
|
||||
"node_modules",
|
||||
];
|
||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||
@@ -268,6 +265,8 @@ impl RustWebapp {
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
|
||||
// let push_options = PushImageOptionsBuilder::new().tag(tag);
|
||||
|
||||
let mut push_image_stream = docker.push_image(
|
||||
image_tag,
|
||||
Some(PushImageOptionsBuilder::new().build()),
|
||||
@@ -275,8 +274,6 @@ impl RustWebapp {
|
||||
);
|
||||
|
||||
while let Some(msg) = push_image_stream.next().await {
|
||||
// let msg = msg?;
|
||||
// TODO this fails silently, for some reason bollard cannot push to hub.nationtech.io
|
||||
debug!("Message: {msg:?}");
|
||||
}
|
||||
|
||||
@@ -411,10 +408,9 @@ impl RustWebapp {
|
||||
}
|
||||
|
||||
/// Creates all necessary files for a basic Helm chart.
|
||||
async fn create_helm_chart_files(
|
||||
fn create_helm_chart_files(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_name = format!("{}-chart", self.name);
|
||||
let chart_dir = self
|
||||
@@ -464,15 +460,21 @@ ingress:
|
||||
enabled: true
|
||||
# Annotations for cert-manager to handle SSL.
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
# Add other annotations like nginx ingress class if needed
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
hosts:
|
||||
- host: {}
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- secretName: {}-tls
|
||||
hosts:
|
||||
- chart-example.local
|
||||
|
||||
"#,
|
||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
||||
chart_name, image_repo, image_tag, self.service_port, self.name
|
||||
);
|
||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||
|
||||
|
||||
6
harmony/src/modules/application/stateful.rs
Normal file
6
harmony/src/modules/application/stateful.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
use crate::modules::application::Application;
|
||||
|
||||
/// A StatefulApplication is an application bundle that writes persistent data.
|
||||
///
|
||||
/// This will enable backup features, stateful multisite replication, etc.
|
||||
pub trait StatefulApplication: Application {}
|
||||
26
harmony/src/modules/application/stateless.rs
Normal file
26
harmony/src/modules/application/stateless.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use crate::modules::application::{Application, features::ContinuousDeliveryApplication};
|
||||
|
||||
/// Marker trait for stateless application that can be deployed anywhere without worrying about
|
||||
/// data.
|
||||
///
|
||||
/// This includes Applications fitting these categories :
|
||||
///
|
||||
/// - Application with all files built into the docker image and never written to, can be mounted
|
||||
/// read-only
|
||||
/// - Application writing to hard drive on ephemeral volume that can be lost at anytime and does
|
||||
/// not require any replication/backup logic to operate
|
||||
/// - Not supported : an application that writes state to a volume that must be shared or kept
|
||||
/// to maintain a quorum across various instances
|
||||
/// - Application connecting to a database/datastore accessible from anywhere such as
|
||||
/// - Public bucket endpoint
|
||||
/// - Publicly accessible
|
||||
/// - Application connecting to a private database external to this application, accessible from the
|
||||
/// deployment target
|
||||
/// - Ensuring the private database is reachable is out of scope of this trait (for now)
|
||||
///
|
||||
/// The entire application definition **must not** require any persistent volume or include a
|
||||
/// deployment component depending on persistent data such as a transitive PostgreSQL helm chart.
|
||||
///
|
||||
/// Typically, applications that can be autoscaled without additional complexity fit the
|
||||
/// StatelessApplication requirements.
|
||||
pub trait StatelessApplication: Application + ContinuousDeliveryApplication {}
|
||||
@@ -153,10 +153,6 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
||||
Some(yaml_str) => {
|
||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||
debug!(
|
||||
"values yaml string for chart {} :\n {yaml_str}",
|
||||
self.score.chart_name
|
||||
);
|
||||
Some(tf.path())
|
||||
}
|
||||
None => None,
|
||||
|
||||
@@ -40,7 +40,6 @@ pub struct K8sIngressScore {
|
||||
pub path: Option<IngressPath>,
|
||||
pub path_type: Option<PathType>,
|
||||
pub namespace: Option<fqdn::FQDN>,
|
||||
pub ingress_class_name: Option<String>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
@@ -55,18 +54,12 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
None => PathType::Prefix,
|
||||
};
|
||||
|
||||
let ingress_class = match self.ingress_class_name.clone() {
|
||||
Some(ingress_class_name) => ingress_class_name,
|
||||
None => format!("\"default\""),
|
||||
};
|
||||
|
||||
let ingress = json!(
|
||||
{
|
||||
"metadata": {
|
||||
"name": self.name.to_string(),
|
||||
},
|
||||
"spec": {
|
||||
"ingressClassName": ingress_class.as_str(),
|
||||
"rules": [
|
||||
{ "host": self.host.to_string(),
|
||||
"http": {
|
||||
|
||||
@@ -147,7 +147,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
|
||||
port: 8080,
|
||||
path: Some(ingress_path),
|
||||
path_type: None,
|
||||
ingress_class_name: None,
|
||||
namespace: self
|
||||
.get_namespace()
|
||||
.map(|nbs| fqdn!(nbs.to_string().as_str())),
|
||||
|
||||
@@ -4,9 +4,7 @@ use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
LabelSelector, PrometheusSpec,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||
|
||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
|
||||
@@ -45,12 +45,6 @@ service:
|
||||
|
||||
ingress:
|
||||
enabled: {ingress_enabled}
|
||||
hosts:
|
||||
- host: {host}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
|
||||
|
||||
route:
|
||||
enabled: {route_enabled}
|
||||
|
||||
@@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule {
|
||||
pub fn alert_container_restarting() -> PrometheusAlertRule {
|
||||
PrometheusAlertRule {
|
||||
alert: "ContainerRestarting".into(),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(),
|
||||
r#for: Some("30s".into()),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(),
|
||||
r#for: Some("5m".into()),
|
||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||
annotations: HashMap::from([
|
||||
(
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use fqdn::fqdn;
|
||||
use std::fs;
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
use tempfile::tempdir;
|
||||
@@ -9,7 +8,6 @@ use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
||||
@@ -25,18 +23,12 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
|
||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
|
||||
build_prom_role, build_prom_rolebinding, build_prom_service_account,
|
||||
};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
||||
ServiceMonitor, ServiceMonitorSpec,
|
||||
};
|
||||
use crate::score::Score;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
||||
use crate::{
|
||||
@@ -56,8 +48,8 @@ pub struct RHOBAlertingScore {
|
||||
pub prometheus_rules: Vec<RuleGroup>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Score<T> for RHOBAlertingScore
|
||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
||||
for RHOBAlertingScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
Box::new(RHOBAlertingInterpret {
|
||||
@@ -82,20 +74,19 @@ pub struct RHOBAlertingInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Interpret<T> for RHOBAlertingInterpret
|
||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
||||
for RHOBAlertingInterpret
|
||||
{
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let client = topology.k8s_client().await.unwrap();
|
||||
self.ensure_grafana_operator().await?;
|
||||
self.install_prometheus(inventory, topology, &client)
|
||||
.await?;
|
||||
self.install_prometheus(&client).await?;
|
||||
self.install_client_kube_metrics().await?;
|
||||
self.install_grafana(inventory, topology, &client).await?;
|
||||
self.install_grafana(&client).await?;
|
||||
self.install_receivers(&self.sender, &self.receivers)
|
||||
.await?;
|
||||
self.install_rules(&self.prometheus_rules, &client).await?;
|
||||
@@ -221,8 +212,7 @@ impl RHOBAlertingInterpret {
|
||||
|
||||
let output = Command::new("helm")
|
||||
.args([
|
||||
"upgrade",
|
||||
"--install",
|
||||
"install",
|
||||
"grafana-operator",
|
||||
"grafana-operator/grafana-operator",
|
||||
"--namespace",
|
||||
@@ -236,7 +226,7 @@ impl RHOBAlertingInterpret {
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"helm upgrade --install failed:\nstdout: {}\nstderr: {}",
|
||||
"helm install failed:\nstdout: {}\nstderr: {}",
|
||||
String::from_utf8_lossy(&output.stdout),
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
)));
|
||||
@@ -248,31 +238,25 @@ impl RHOBAlertingInterpret {
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_prometheus<T: Topology + K8sclient + Ingress>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
async fn install_prometheus(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||
debug!(
|
||||
"installing crd-prometheuses in namespace {}",
|
||||
self.sender.namespace.clone()
|
||||
);
|
||||
debug!("building role/rolebinding/serviceaccount for crd-prometheus");
|
||||
|
||||
let stack = MonitoringStack {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
labels: Some([("monitoring-stack".into(), "true".into())].into()),
|
||||
labels: Some([("coo".into(), "example".into())].into()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: MonitoringStackSpec {
|
||||
log_level: Some("debug".into()),
|
||||
retention: Some("1d".into()),
|
||||
resource_selector: Some(LabelSelector {
|
||||
match_labels: Default::default(),
|
||||
match_expressions: vec![],
|
||||
match_labels: [("app".into(), "demo".into())].into(),
|
||||
..Default::default()
|
||||
}),
|
||||
},
|
||||
};
|
||||
@@ -281,42 +265,6 @@ impl RHOBAlertingInterpret {
|
||||
.apply(&stack, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
let alert_manager_domain = topology
|
||||
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-alert-manager", self.sender.namespace.clone());
|
||||
let backend_service = format!("alertmanager-operated");
|
||||
let namespace = self.sender.namespace.clone();
|
||||
let alert_manager_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&alert_manager_domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 9093,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
let prometheus_domain = topology
|
||||
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-prometheus", self.sender.namespace.clone());
|
||||
let backend_service = format!("prometheus-operated");
|
||||
let prometheus_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&prometheus_domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 9090,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
alert_manager_ingress.interpret(inventory, topology).await?;
|
||||
prometheus_ingress.interpret(inventory, topology).await?;
|
||||
info!("installed rhob monitoring stack",);
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed rhob-prometheus {:#?}",
|
||||
@@ -324,6 +272,31 @@ impl RHOBAlertingInterpret {
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_alert_manager(
|
||||
&self,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let am = Alertmanager {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.sender.namespace.clone()),
|
||||
labels: Some(std::collections::BTreeMap::from([(
|
||||
"alertmanagerConfig".to_string(),
|
||||
"enabled".to_string(),
|
||||
)])),
|
||||
namespace: Some(self.sender.namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: AlertmanagerSpec::default(),
|
||||
};
|
||||
client
|
||||
.apply(&am, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed service monitor {:#?}",
|
||||
am.metadata.name
|
||||
)))
|
||||
}
|
||||
async fn install_monitors(
|
||||
&self,
|
||||
mut monitors: Vec<ServiceMonitor>,
|
||||
@@ -406,12 +379,7 @@ impl RHOBAlertingInterpret {
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_grafana<T: Topology + K8sclient + Ingress>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
async fn install_grafana(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||
let mut label = BTreeMap::new();
|
||||
label.insert("dashboards".to_string(), "grafana".to_string());
|
||||
let labels = LabelSelector {
|
||||
@@ -497,23 +465,6 @@ impl RHOBAlertingInterpret {
|
||||
.apply(&grafana, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
let domain = topology
|
||||
.get_domain(&format!("grafana-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-grafana", self.sender.namespace.clone());
|
||||
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
|
||||
let grafana_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 3000,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
grafana_ingress.interpret(inventory, topology).await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed grafana instance {:#?}",
|
||||
grafana.metadata.name
|
||||
|
||||
@@ -120,26 +120,10 @@ impl SecretManager {
|
||||
|
||||
let ns = &manager.namespace;
|
||||
let key = T::KEY;
|
||||
let secret_json = inquire::Editor::new(&format!(
|
||||
"Secret not found for {ns} {key}, paste the JSON here :",
|
||||
let secret_json = inquire::Text::new(&format!(
|
||||
"Secret not found for {} {}, paste the JSON here :",
|
||||
ns, key
|
||||
))
|
||||
.with_formatter(&|data| {
|
||||
let char_count = data.chars().count();
|
||||
if char_count == 0 {
|
||||
String::from("<skipped>")
|
||||
} else if char_count <= 20 {
|
||||
data.into()
|
||||
} else {
|
||||
let mut substr: String = data.chars().take(17).collect();
|
||||
substr.push_str("...");
|
||||
substr
|
||||
}
|
||||
})
|
||||
.with_render_config(
|
||||
inquire::ui::RenderConfig::default().with_canceled_prompt_indicator(
|
||||
inquire::ui::Styled::new("<skipped>").with_fg(inquire::ui::Color::DarkYellow),
|
||||
),
|
||||
)
|
||||
.prompt()
|
||||
.map_err(|e| {
|
||||
SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into())
|
||||
|
||||
@@ -2,8 +2,8 @@ mod downloadable_asset;
|
||||
use downloadable_asset::*;
|
||||
|
||||
use kube::Client;
|
||||
use log::{debug, info};
|
||||
use std::{ffi::OsStr, path::PathBuf};
|
||||
use log::debug;
|
||||
use std::path::PathBuf;
|
||||
|
||||
const K3D_BIN_FILE_NAME: &str = "k3d";
|
||||
|
||||
@@ -213,19 +213,15 @@ impl K3d {
|
||||
}
|
||||
}
|
||||
|
||||
let client;
|
||||
if !self.is_cluster_initialized() {
|
||||
debug!("Cluster is not initialized, initializing now");
|
||||
client = self.initialize_cluster().await?;
|
||||
} else {
|
||||
self.start_cluster().await?;
|
||||
|
||||
debug!("K3d and cluster are already properly set up");
|
||||
client = self.create_kubernetes_client().await?;
|
||||
return self.initialize_cluster().await;
|
||||
}
|
||||
|
||||
self.ensure_k3d_config_is_default(self.get_cluster_name()?)?;
|
||||
Ok(client)
|
||||
self.start_cluster().await?;
|
||||
|
||||
debug!("K3d and cluster are already properly set up");
|
||||
self.create_kubernetes_client().await
|
||||
}
|
||||
|
||||
// Private helper methods
|
||||
@@ -306,16 +302,7 @@ impl K3d {
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
let binary_path = self.get_k3d_binary()?;
|
||||
self.run_command(binary_path, args)
|
||||
}
|
||||
|
||||
pub fn run_command<I, S, C>(&self, cmd: C, args: I) -> Result<std::process::Output, String>
|
||||
where
|
||||
I: IntoIterator<Item = S>,
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
C: AsRef<OsStr>,
|
||||
{
|
||||
let output = std::process::Command::new(cmd).args(args).output();
|
||||
let output = std::process::Command::new(binary_path).args(args).output();
|
||||
match output {
|
||||
Ok(output) => {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
@@ -324,7 +311,7 @@ impl K3d {
|
||||
debug!("stdout : {}", stdout);
|
||||
Ok(output)
|
||||
}
|
||||
Err(e) => Err(format!("Failed to execute command: {}", e)),
|
||||
Err(e) => Err(format!("Failed to execute k3d command: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,38 +323,12 @@ impl K3d {
|
||||
return Err(format!("Failed to create cluster: {}", stderr));
|
||||
}
|
||||
|
||||
info!("Successfully created k3d cluster '{}'", cluster_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_k3d_config_is_default(&self, cluster_name: &str) -> Result<(), String> {
|
||||
let output = self.run_k3d_command(["kubeconfig", "merge", "-d", cluster_name])?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!("Failed to setup k3d kubeconfig : {}", stderr));
|
||||
}
|
||||
|
||||
let output = self.run_command(
|
||||
"kubectl",
|
||||
["config", "use-context", &format!("k3d-{cluster_name}")],
|
||||
)?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!(
|
||||
"Failed to switch kubectl context to k3d : {}",
|
||||
stderr
|
||||
));
|
||||
}
|
||||
info!(
|
||||
"kubectl is now using 'k3d-{}' as default context",
|
||||
cluster_name
|
||||
);
|
||||
debug!("Successfully created k3d cluster '{}'", cluster_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_kubernetes_client(&self) -> Result<Client, String> {
|
||||
// TODO: Connect the client to the right k3d cluster (see https://git.nationtech.io/NationTech/harmony/issues/92)
|
||||
Client::try_default()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to create Kubernetes client: {}", e))
|
||||
|
||||
Reference in New Issue
Block a user