fix/ingress #145
@ -27,7 +27,6 @@ async fn main() {
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
@ -17,7 +17,6 @@ use harmony_types::net::Url;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "test-rhob-monitoring".to_string(),
|
||||
domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()),
|
||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
@ -19,7 +19,6 @@ use harmony_macros::hurl;
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
domain: hurl!("https://rustapp.harmony.example.com"),
|
||||
project_root: PathBuf::from("./webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
|
@ -1,23 +1,21 @@
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring},
|
||||
features::{ContinuousDelivery, Monitoring, rhob_monitoring::RHOBMonitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-tryrust".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./tryrust.org"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
@ -25,7 +23,7 @@ async fn main() {
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
url: hurl!("https://discord.doesnt.exist.com"),
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
@ -33,7 +31,7 @@ async fn main() {
|
||||
Box::new(ContinuousDelivery {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(Monitoring {
|
||||
Box::new(RHOBMonitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![Box::new(discord_receiver)],
|
||||
}),
|
||||
|
@ -10,7 +10,11 @@ testing = []
|
||||
|
||||
[dependencies]
|
||||
hex = "0.4"
|
||||
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
|
||||
reqwest = { version = "0.11", features = [
|
||||
"blocking",
|
||||
"json",
|
||||
"rustls-tls",
|
||||
], default-features = false }
|
||||
russh = "0.45.0"
|
||||
rust-ipmi = "0.1.1"
|
||||
semver = "1.0.23"
|
||||
|
7
harmony/src/domain/topology/ingress.rs
Normal file
7
harmony/src/domain/topology/ingress.rs
Normal file
@ -0,0 +1,7 @@
|
||||
use crate::topology::PreparationError;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Ingress {
|
||||
async fn get_domain(&self, service: String) -> Result<String, PreparationError>;
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use kube::api::GroupVersionKind;
|
||||
use log::{debug, info, warn};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
@ -22,6 +23,7 @@ use crate::{
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::ingress::Ingress,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@ -198,6 +200,26 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
|
||||
.await?;
|
||||
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
|
||||
if ready_replicas >= 1 {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(PreparationError::new(
|
||||
"openshift-ingress-operator not available".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn is_helm_available(&self) -> Result<(), String> {
|
||||
let version_result = Command::new("helm")
|
||||
.arg("version")
|
||||
@ -550,3 +572,45 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
johnride
commented
Implement correctly various k8s implementations Implement correctly various k8s implementations
|
||||
async fn get_domain(&self, service: String) -> Result<String, PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => Ok("localhost".to_string()),
|
||||
K8sSource::Kubeconfig => {
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
|
||||
let gvk = GroupVersionKind {
|
||||
group: "operator.openshift.io".into(),
|
||||
version: "v1".into(),
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
||||
})?;
|
||||
|
||||
match ic.data["status"]["domain"].as_str() {
|
||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(PreparationError::new(
|
||||
"Cannot get domain: unable to detect K8s state".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
mod ha_cluster;
|
||||
pub mod ingress;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
|
@ -14,7 +14,9 @@ use crate::{
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||
topology::{
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
},
|
||||
};
|
||||
|
||||
/// ContinuousDelivery in Harmony provides this functionality :
|
||||
@ -136,18 +138,25 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
#[async_trait]
|
||||
impl<
|
||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
let image = self.application.image_name();
|
||||
let domain = topology
|
||||
.get_domain(self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
// url, etc..)
|
||||
// Or ask for it when unknown
|
||||
|
||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||
let helm_chart = self
|
||||
.application
|
||||
.build_push_helm_package(&image, &domain)
|
||||
.await?;
|
||||
|
||||
// TODO: Make building image configurable/skippable if image already exists (prompt)")
|
||||
// https://git.nationtech.io/NationTech/harmony/issues/104
|
||||
|
@ -13,7 +13,8 @@ use crate::{
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
|
||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
||||
k8s::K8sClient,
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
@ -27,7 +28,7 @@ pub struct ArgoHelmScore {
|
||||
pub argo_apps: Vec<ArgoApplication>,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
|
||||
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
Box::new(ArgoInterpret {
|
||||
score: self.clone(),
|
||||
@ -47,17 +48,14 @@ pub struct ArgoInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
||||
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let domain = self
|
||||
.get_host_domain(k8s_client.clone(), self.score.openshift)
|
||||
.await?;
|
||||
let domain = format!("argo.{domain}");
|
||||
let domain = topology.get_domain("argo".into()).await?;
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
|
||||
|
@ -1,10 +1,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::modules::application::{Application, ApplicationFeature};
|
||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
@ -19,8 +17,12 @@ use crate::{
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use harmony_secret::SecretManager;
|
||||
use harmony_secret_derive::Secret;
|
||||
use harmony_types::net::Url;
|
||||
use log::{debug, info};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Monitoring {
|
||||
@ -36,8 +38,9 @@ impl<
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>,
|
||||
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
||||
+ Ingress
|
||||
+ std::fmt::Debug,
|
||||
> ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
@ -47,6 +50,7 @@ impl<
|
||||
.await
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
let domain = topology.get_domain("ntfy".into()).await.unwrap();
|
||||
|
||||
let mut alerting_score = ApplicationMonitoringScore {
|
||||
sender: CRDPrometheus {
|
||||
@ -58,19 +62,17 @@ impl<
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
host: domain,
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
let config = SecretManager::get_or_prompt::<NtfyAuth>().await.unwrap();
|
||||
|
||||
let ntfy_default_auth_header = format!(
|
||||
"Basic {}",
|
||||
general_purpose::STANDARD.encode(format!(
|
||||
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||
))
|
||||
general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password))
|
||||
);
|
||||
|
||||
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||
@ -100,9 +102,17 @@ impl<
|
||||
.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"Monitoring".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Clone, Debug)]
|
||||
struct NtfyAuth {
|
||||
username: String,
|
||||
password: String,
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Application;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait OCICompliant: Application {
|
||||
@ -17,5 +16,10 @@ pub trait HelmPackage: Application {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>;
|
||||
/// * `domain` - The domain where the application is hosted.
|
||||
async fn build_push_helm_package(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<String, String>;
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
use std::fs::{self, File};
|
||||
use std::io::Read;
|
||||
use std::fs::{self};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
@ -13,12 +12,11 @@ use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||
use futures_util::StreamExt;
|
||||
use log::{debug, info, log_enabled};
|
||||
use serde::Serialize;
|
||||
use tar::{Archive, Builder, Header};
|
||||
use tar::{Builder, Header};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::{score::Score, topology::Topology};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
|
||||
@ -58,7 +56,6 @@ pub enum RustWebFramework {
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RustWebapp {
|
||||
pub name: String,
|
||||
pub domain: Url,
|
||||
/// The path to the root of the Rust project to be containerized.
|
||||
pub project_root: PathBuf,
|
||||
pub service_port: u32,
|
||||
@ -73,12 +70,17 @@ impl Application for RustWebapp {
|
||||
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||
async fn build_push_helm_package(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<String, String> {
|
||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
||||
|
||||
// 1. Create the Helm chart files on disk.
|
||||
let chart_dir = self
|
||||
.create_helm_chart_files(image_url)
|
||||
.create_helm_chart_files(image_url, domain)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
|
||||
@ -408,9 +410,10 @@ impl RustWebapp {
|
||||
}
|
||||
|
||||
/// Creates all necessary files for a basic Helm chart.
|
||||
fn create_helm_chart_files(
|
||||
async fn create_helm_chart_files(
|
||||
&self,
|
||||
image_url: &str,
|
||||
domain: &str,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_name = format!("{}-chart", self.name);
|
||||
let chart_dir = self
|
||||
@ -422,6 +425,7 @@ impl RustWebapp {
|
||||
fs::create_dir_all(&templates_dir)?;
|
||||
|
||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||
let domain = format!("{}.{domain}", self.name);
|
||||
|
||||
// Create Chart.yaml
|
||||
let chart_yaml = format!(
|
||||
@ -460,21 +464,15 @@ ingress:
|
||||
enabled: true
|
||||
# Annotations for cert-manager to handle SSL.
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
# Add other annotations like nginx ingress class if needed
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
- host: {}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- secretName: {}-tls
|
||||
hosts:
|
||||
- chart-example.local
|
||||
|
||||
"#,
|
||||
chart_name, image_repo, image_tag, self.service_port, self.name
|
||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
||||
);
|
||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||
|
||||
|
@ -40,6 +40,7 @@ pub struct K8sIngressScore {
|
||||
pub path: Option<IngressPath>,
|
||||
pub path_type: Option<PathType>,
|
||||
pub namespace: Option<fqdn::FQDN>,
|
||||
pub ingress_class_name: Option<String>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
@ -54,12 +55,18 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
None => PathType::Prefix,
|
||||
};
|
||||
|
||||
let ingress_class = match self.ingress_class_name.clone() {
|
||||
Some(ingress_class_name) => ingress_class_name,
|
||||
None => format!("\"default\""),
|
||||
};
|
||||
|
||||
let ingress = json!(
|
||||
{
|
||||
"metadata": {
|
||||
"name": self.name.to_string(),
|
||||
},
|
||||
"spec": {
|
||||
"ingressClassName": ingress_class.as_str(),
|
||||
"rules": [
|
||||
{ "host": self.host.to_string(),
|
||||
"http": {
|
||||
|
@ -147,6 +147,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
|
||||
port: 8080,
|
||||
path: Some(ingress_path),
|
||||
path_type: None,
|
||||
ingress_class_name: None,
|
||||
namespace: self
|
||||
.get_namespace()
|
||||
.map(|nbs| fqdn!(nbs.to_string().as_str())),
|
||||
|
@ -1,3 +1,4 @@
|
||||
use fqdn::fqdn;
|
||||
use std::fs;
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
use tempfile::tempdir;
|
||||
@ -8,6 +9,7 @@ use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
||||
@ -29,6 +31,7 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
||||
ServiceMonitor, ServiceMonitorSpec,
|
||||
};
|
||||
use crate::score::Score;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
||||
use crate::{
|
||||
@ -48,8 +51,8 @@ pub struct RHOBAlertingScore {
|
||||
pub prometheus_rules: Vec<RuleGroup>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
||||
for RHOBAlertingScore
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Score<T> for RHOBAlertingScore
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
Box::new(RHOBAlertingInterpret {
|
||||
@ -74,19 +77,20 @@ pub struct RHOBAlertingInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
||||
for RHOBAlertingInterpret
|
||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
|
||||
Interpret<T> for RHOBAlertingInterpret
|
||||
{
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let client = topology.k8s_client().await.unwrap();
|
||||
self.ensure_grafana_operator().await?;
|
||||
self.install_prometheus(&client).await?;
|
||||
self.install_prometheus(inventory, topology, &client)
|
||||
.await?;
|
||||
self.install_client_kube_metrics().await?;
|
||||
self.install_grafana(&client).await?;
|
||||
self.install_grafana(inventory, topology, &client).await?;
|
||||
self.install_receivers(&self.sender, &self.receivers)
|
||||
.await?;
|
||||
self.install_rules(&self.prometheus_rules, &client).await?;
|
||||
@ -238,7 +242,12 @@ impl RHOBAlertingInterpret {
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_prometheus(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||
async fn install_prometheus<T: Topology + K8sclient + Ingress>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
debug!(
|
||||
"installing crd-prometheuses in namespace {}",
|
||||
self.sender.namespace.clone()
|
||||
@ -265,6 +274,42 @@ impl RHOBAlertingInterpret {
|
||||
.apply(&stack, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
let alert_manager_domain = topology
|
||||
.get_domain(format!("alert-manager-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-alert-manager", self.sender.namespace.clone());
|
||||
let backend_service = format!("alertmanager-operated");
|
||||
let namespace = self.sender.namespace.clone();
|
||||
let alert_manager_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&alert_manager_domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 9093,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
let prometheus_domain = topology
|
||||
.get_domain(format!("prometheus-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-prometheus", self.sender.namespace.clone());
|
||||
let backend_service = format!("prometheus-operated");
|
||||
let prometheus_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&prometheus_domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 9090,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
alert_manager_ingress.interpret(inventory, topology).await?;
|
||||
prometheus_ingress.interpret(inventory, topology).await?;
|
||||
info!("installed rhob monitoring stack",);
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed rhob-prometheus {:#?}",
|
||||
@ -379,7 +424,12 @@ impl RHOBAlertingInterpret {
|
||||
)))
|
||||
}
|
||||
|
||||
async fn install_grafana(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
||||
async fn install_grafana<T: Topology + K8sclient + Ingress>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
client: &Arc<K8sClient>,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let mut label = BTreeMap::new();
|
||||
label.insert("dashboards".to_string(), "grafana".to_string());
|
||||
let labels = LabelSelector {
|
||||
@ -465,6 +515,23 @@ impl RHOBAlertingInterpret {
|
||||
.apply(&grafana, Some(&self.sender.namespace.clone()))
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
let domain = topology
|
||||
.get_domain(format!("grafana-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-grafana", self.sender.namespace.clone());
|
||||
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
|
||||
let grafana_ingress = K8sIngressScore {
|
||||
name: fqdn!(&name),
|
||||
host: fqdn!(&domain),
|
||||
backend_service: fqdn!(&backend_service),
|
||||
port: 3000,
|
||||
path: Some("/".to_string()),
|
||||
path_type: Some(PathType::Prefix),
|
||||
namespace: Some(fqdn!(&namespace)),
|
||||
ingress_class_name: Some("openshift-default".to_string()),
|
||||
};
|
||||
|
||||
grafana_ingress.interpret(inventory, topology).await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully deployed grafana instance {:#?}",
|
||||
grafana.metadata.name
|
||||
|
Loading…
Reference in New Issue
Block a user
Should checkc the clusteroperator crd status.