fix/ingress #145
12
harmony/src/domain/topology/ingress.rs
Normal file
12
harmony/src/domain/topology/ingress.rs
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
interpret::InterpretError,
|
||||||
|
topology::{PreparationError, k8s::K8sClient},
|
||||||
|
};
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Ingress {
|
||||||
|
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError>;
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::{process::Command, sync::Arc};
|
use std::{process::Command, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use kube::api::GroupVersionKind;
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
@@ -22,6 +23,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
|
topology::ingress::Ingress,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@@ -198,6 +200,26 @@ impl K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
||||||
|
let client = self.k8s_client().await?;
|
||||||
|
let gvk = GroupVersionKind {
|
||||||
|
group: "operator.openshift.io".into(),
|
||||||
|
version: "v1".into(),
|
||||||
|
kind: "IngressController".into(),
|
||||||
|
};
|
||||||
|
let ic = client
|
||||||
|
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||||
|
|
|||||||
|
.await?;
|
||||||
|
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
|
||||||
|
if ready_replicas >= 1 {
|
||||||
|
return Ok(());
|
||||||
|
} else {
|
||||||
|
return Err(PreparationError::new(
|
||||||
|
"openshift-ingress-operator not available".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn is_helm_available(&self) -> Result<(), String> {
|
fn is_helm_available(&self) -> Result<(), String> {
|
||||||
let version_result = Command::new("helm")
|
let version_result = Command::new("helm")
|
||||||
.arg("version")
|
.arg("version")
|
||||||
@@ -550,3 +572,26 @@ impl TenantManager for K8sAnywhereTopology {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Ingress for K8sAnywhereTopology {
|
||||||
|
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||||
|
johnride
commented
Implement correctly various k8s implementations Implement correctly various k8s implementations
|
|||||||
|
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError> {
|
||||||
|
self.openshift_ingress_operator_available().await?;
|
||||||
|
|
||||||
|
let gvk = GroupVersionKind {
|
||||||
|
group: "operator.openshift.io".into(),
|
||||||
|
version: "v1".into(),
|
||||||
|
kind: "IngressController".into(),
|
||||||
|
};
|
||||||
|
let ic = client
|
||||||
|
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||||
|
.await
|
||||||
|
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
|
||||||
|
|
||||||
|
match ic.data["status"]["domain"].as_str() {
|
||||||
|
Some(domain) => Ok(domain.to_string()),
|
||||||
|
None => Err(PreparationError::new("Could not find domain".to_string())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
|
pub mod ingress;
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
mod http;
|
mod http;
|
||||||
|
|||||||
@@ -10,11 +10,10 @@ use crate::{
|
|||||||
data::Version,
|
data::Version,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::application::{
|
modules::application::{
|
||||||
ApplicationFeature, HelmPackage, OCICompliant,
|
features::{ArgoApplication, ArgoHelmScore}, ApplicationFeature, HelmPackage, OCICompliant
|
||||||
features::{ArgoApplication, ArgoHelmScore},
|
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
topology::{ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// ContinuousDelivery in Harmony provides this functionality :
|
/// ContinuousDelivery in Harmony provides this functionality :
|
||||||
@@ -136,7 +135,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<
|
impl<
|
||||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||||
> ApplicationFeature<T> for ContinuousDelivery<A>
|
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||||
{
|
{
|
||||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use crate::{
|
|||||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{
|
topology::{
|
||||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
|
ingress::Ingress, k8s::K8sClient, HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
@@ -27,7 +27,7 @@ pub struct ArgoHelmScore {
|
|||||||
pub argo_apps: Vec<ArgoApplication>,
|
pub argo_apps: Vec<ArgoApplication>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
|
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore {
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
Box::new(ArgoInterpret {
|
Box::new(ArgoInterpret {
|
||||||
score: self.clone(),
|
score: self.clone(),
|
||||||
@@ -47,16 +47,14 @@ pub struct ArgoInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let k8s_client = topology.k8s_client().await?;
|
let k8s_client = topology.k8s_client().await?;
|
||||||
let domain = self
|
let domain = topology.get_domain(k8s_client.clone()).await?;
|
||||||
.get_host_domain(k8s_client.clone(), self.score.openshift)
|
|
||||||
.await?;
|
|
||||||
let domain = format!("argo.{domain}");
|
let domain = format!("argo.{domain}");
|
||||||
let helm_score =
|
let helm_score =
|
||||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||||
|
|||||||
@@ -411,6 +411,7 @@ impl RustWebapp {
|
|||||||
fn create_helm_chart_files(
|
fn create_helm_chart_files(
|
||||||
&self,
|
&self,
|
||||||
image_url: &str,
|
image_url: &str,
|
||||||
|
topology: &T,
|
||||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||||
let chart_name = format!("{}-chart", self.name);
|
let chart_name = format!("{}-chart", self.name);
|
||||||
let chart_dir = self
|
let chart_dir = self
|
||||||
@@ -422,6 +423,9 @@ impl RustWebapp {
|
|||||||
fs::create_dir_all(&templates_dir)?;
|
fs::create_dir_all(&templates_dir)?;
|
||||||
|
|
||||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||||
|
|
||||||
|
//TODO need to find a way to use topology to get the domain
|
||||||
|
let domain = topology.get_domain(client.clone()).await?;
|
||||||
|
|
||||||
// Create Chart.yaml
|
// Create Chart.yaml
|
||||||
let chart_yaml = format!(
|
let chart_yaml = format!(
|
||||||
@@ -464,17 +468,17 @@ ingress:
|
|||||||
# Add other annotations like nginx ingress class if needed
|
# Add other annotations like nginx ingress class if needed
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
hosts:
|
hosts:
|
||||||
- host: chart-example.local
|
- host: {}
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: ImplementationSpecific
|
pathType: ImplementationSpecific
|
||||||
tls:
|
tls:
|
||||||
- secretName: {}-tls
|
- secretName: {}-tls
|
||||||
hosts:
|
hosts:
|
||||||
- chart-example.local
|
- {}
|
||||||
|
|
||||||
"#,
|
"#,
|
||||||
chart_name, image_repo, image_tag, self.service_port, self.name
|
chart_name, image_repo, image_tag, self.service_port, domain, self.name
|
||||||
);
|
);
|
||||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||||
|
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ pub struct K8sIngressScore {
|
|||||||
pub path: Option<IngressPath>,
|
pub path: Option<IngressPath>,
|
||||||
pub path_type: Option<PathType>,
|
pub path_type: Option<PathType>,
|
||||||
pub namespace: Option<fqdn::FQDN>,
|
pub namespace: Option<fqdn::FQDN>,
|
||||||
|
pub ingress_class_name: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||||
@@ -54,12 +55,20 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
|||||||
None => PathType::Prefix,
|
None => PathType::Prefix,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let ingress_class = match self.ingress_class_name.clone() {
|
||||||
|
Some(ingress_class_name) => {
|
||||||
|
ingress_class_name
|
||||||
|
}
|
||||||
|
None => format!("\"default\""),
|
||||||
|
};
|
||||||
|
|
||||||
let ingress = json!(
|
let ingress = json!(
|
||||||
{
|
{
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": self.name.to_string(),
|
"name": self.name.to_string(),
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
|
"ingressClassName": ingress_class.as_str(),
|
||||||
"rules": [
|
"rules": [
|
||||||
{ "host": self.host.to_string(),
|
{ "host": self.host.to_string(),
|
||||||
"http": {
|
"http": {
|
||||||
|
|||||||
@@ -147,6 +147,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
|
|||||||
port: 8080,
|
port: 8080,
|
||||||
path: Some(ingress_path),
|
path: Some(ingress_path),
|
||||||
path_type: None,
|
path_type: None,
|
||||||
|
ingress_class_name: None,
|
||||||
namespace: self
|
namespace: self
|
||||||
.get_namespace()
|
.get_namespace()
|
||||||
.map(|nbs| fqdn!(nbs.to_string().as_str())),
|
.map(|nbs| fqdn!(nbs.to_string().as_str())),
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
use fqdn::fqdn;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
@@ -8,6 +9,7 @@ use log::{debug, info};
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
|
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
|
||||||
@@ -29,6 +31,7 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
|||||||
ServiceMonitor, ServiceMonitorSpec,
|
ServiceMonitor, ServiceMonitorSpec,
|
||||||
};
|
};
|
||||||
use crate::score::Score;
|
use crate::score::Score;
|
||||||
|
use crate::topology::ingress::Ingress;
|
||||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -48,7 +51,7 @@ pub struct RHOBAlertingScore {
|
|||||||
pub prometheus_rules: Vec<RuleGroup>,
|
pub prometheus_rules: Vec<RuleGroup>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
||||||
for RHOBAlertingScore
|
for RHOBAlertingScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
@@ -74,19 +77,20 @@ pub struct RHOBAlertingInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
||||||
for RHOBAlertingInterpret
|
for RHOBAlertingInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let client = topology.k8s_client().await.unwrap();
|
let client = topology.k8s_client().await.unwrap();
|
||||||
self.ensure_grafana_operator().await?;
|
self.ensure_grafana_operator().await?;
|
||||||
self.install_prometheus(&client).await?;
|
self.install_prometheus(inventory, topology, &client)
|
||||||
|
.await?;
|
||||||
self.install_client_kube_metrics().await?;
|
self.install_client_kube_metrics().await?;
|
||||||
self.install_grafana(&client).await?;
|
self.install_grafana(inventory, topology,&client).await?;
|
||||||
self.install_receivers(&self.sender, &self.receivers)
|
self.install_receivers(&self.sender, &self.receivers)
|
||||||
.await?;
|
.await?;
|
||||||
self.install_rules(&self.prometheus_rules, &client).await?;
|
self.install_rules(&self.prometheus_rules, &client).await?;
|
||||||
@@ -238,7 +242,12 @@ impl RHOBAlertingInterpret {
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn install_prometheus(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
async fn install_prometheus<T: Topology + K8sclient + Ingress >(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
debug!(
|
debug!(
|
||||||
"installing crd-prometheuses in namespace {}",
|
"installing crd-prometheuses in namespace {}",
|
||||||
self.sender.namespace.clone()
|
self.sender.namespace.clone()
|
||||||
@@ -265,6 +274,36 @@ impl RHOBAlertingInterpret {
|
|||||||
.apply(&stack, Some(&self.sender.namespace.clone()))
|
.apply(&stack, Some(&self.sender.namespace.clone()))
|
||||||
.await
|
.await
|
||||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
let domain = topology.get_domain(client.clone()).await?;
|
||||||
|
let name = format!("{}-alert-manager", self.sender.namespace.clone());
|
||||||
|
let backend_service = format!("{}-alert-manager", self.sender.namespace.clone());
|
||||||
|
let namespace = self.sender.namespace.clone();
|
||||||
|
let alert_manager_ingress = K8sIngressScore {
|
||||||
|
name: fqdn!(&name),
|
||||||
|
host: fqdn!(&domain),
|
||||||
|
backend_service: fqdn!(&backend_service),
|
||||||
|
port: 9093,
|
||||||
|
path: Some("/".to_string()),
|
||||||
|
path_type: Some(PathType::Prefix),
|
||||||
|
namespace: Some(fqdn!(&namespace)),
|
||||||
|
ingress_class_name: Some("openshift-default".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let name = format!("{}-prometheus", self.sender.namespace.clone());
|
||||||
|
let backend_service = format!("{}-prometheus", self.sender.namespace.clone());
|
||||||
|
let prometheus_ingress = K8sIngressScore {
|
||||||
|
name: fqdn!(&name),
|
||||||
|
host: fqdn!(&domain),
|
||||||
|
backend_service: fqdn!(&backend_service),
|
||||||
|
port: 9090,
|
||||||
|
path: Some("/".to_string()),
|
||||||
|
path_type: Some(PathType::Prefix),
|
||||||
|
namespace: Some(fqdn!(&namespace)),
|
||||||
|
ingress_class_name: Some("openshift-default".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
alert_manager_ingress.interpret(inventory, topology).await?;
|
||||||
|
prometheus_ingress.interpret(inventory, topology).await?;
|
||||||
info!("installed rhob monitoring stack",);
|
info!("installed rhob monitoring stack",);
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"successfully deployed rhob-prometheus {:#?}",
|
"successfully deployed rhob-prometheus {:#?}",
|
||||||
@@ -379,7 +418,12 @@ impl RHOBAlertingInterpret {
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn install_grafana(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
async fn install_grafana<T: Topology + K8sclient + Ingress>(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
let mut label = BTreeMap::new();
|
let mut label = BTreeMap::new();
|
||||||
label.insert("dashboards".to_string(), "grafana".to_string());
|
label.insert("dashboards".to_string(), "grafana".to_string());
|
||||||
let labels = LabelSelector {
|
let labels = LabelSelector {
|
||||||
@@ -465,6 +509,21 @@ impl RHOBAlertingInterpret {
|
|||||||
.apply(&grafana, Some(&self.sender.namespace.clone()))
|
.apply(&grafana, Some(&self.sender.namespace.clone()))
|
||||||
.await
|
.await
|
||||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
let domain = topology.get_domain(client.clone()).await?;
|
||||||
|
let name = format!("{}-grafana", self.sender.namespace.clone());
|
||||||
|
let backend_service = format!("{}-grafana", self.sender.namespace.clone());
|
||||||
|
let grafana_ingress = K8sIngressScore {
|
||||||
|
name: fqdn!(&name),
|
||||||
|
host: fqdn!(&domain),
|
||||||
|
backend_service: fqdn!(&backend_service),
|
||||||
|
port: 9090,
|
||||||
|
path: Some("/".to_string()),
|
||||||
|
path_type: Some(PathType::Prefix),
|
||||||
|
namespace: Some(fqdn!(&namespace)),
|
||||||
|
ingress_class_name: Some("openshift-default".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
grafana_ingress.interpret(inventory, topology).await?;
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"successfully deployed grafana instance {:#?}",
|
"successfully deployed grafana instance {:#?}",
|
||||||
grafana.metadata.name
|
grafana.metadata.name
|
||||||
|
|||||||
Reference in New Issue
Block a user
Should checkc the clusteroperator crd status.