fix: Multiple ingress fixes for localk3d, it works nicely now for Application and ntfy at least. Also fix k3d kubeconfig context by force switching to it every time. Not perfect but better and more intuitive for the user to view his resources.
This commit is contained in:
@@ -3,5 +3,5 @@ use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Ingress {
|
||||
async fn get_domain(&self, service: String) -> Result<String, PreparationError>;
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError>;
|
||||
}
|
||||
|
||||
@@ -372,6 +372,8 @@ impl K8sAnywhereTopology {
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
warn!("Installing observability operator is not supported on LocalK3d source");
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
debug!("installing cluster observability operator");
|
||||
todo!();
|
||||
let op_score =
|
||||
@@ -576,12 +578,12 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
async fn get_domain(&self, service: String) -> Result<String, PreparationError> {
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => Ok("localhost".to_string()),
|
||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
||||
K8sSource::Kubeconfig => {
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@ impl<
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
let image = self.application.image_name();
|
||||
let domain = topology
|
||||
.get_domain(self.application.name())
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let domain = topology.get_domain("argo".into()).await?;
|
||||
let domain = topology.get_domain("argo").await?;
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ impl<
|
||||
.await
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
let domain = topology.get_domain("ntfy".into()).await.unwrap();
|
||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||
|
||||
let mut alerting_score = ApplicationMonitoringScore {
|
||||
sender: CRDPrometheus {
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::modules::monitoring::application_monitoring::rhobs_application_monito
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
@@ -37,6 +38,7 @@ impl<
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ Ingress
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||
> ApplicationFeature<T> for RHOBMonitoring
|
||||
@@ -59,7 +61,10 @@ impl<
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
host: topology
|
||||
.get_domain("ntfy")
|
||||
.await
|
||||
.map_err(|e| format!("Could not get domain {e}"))?,
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
|
||||
@@ -222,6 +222,7 @@ impl RustWebapp {
|
||||
".git",
|
||||
".github",
|
||||
".harmony_generated",
|
||||
"harmony",
|
||||
"node_modules",
|
||||
];
|
||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||
@@ -425,7 +426,6 @@ impl RustWebapp {
|
||||
fs::create_dir_all(&templates_dir)?;
|
||||
|
||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||
let domain = format!("{}.{domain}", self.name);
|
||||
|
||||
// Create Chart.yaml
|
||||
let chart_yaml = format!(
|
||||
|
||||
@@ -153,6 +153,10 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
||||
Some(yaml_str) => {
|
||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||
debug!(
|
||||
"values yaml string for chart {} :\n {yaml_str}",
|
||||
self.score.chart_name
|
||||
);
|
||||
Some(tf.path())
|
||||
}
|
||||
None => None,
|
||||
|
||||
@@ -45,6 +45,12 @@ service:
|
||||
|
||||
ingress:
|
||||
enabled: {ingress_enabled}
|
||||
hosts:
|
||||
- host: {host}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
|
||||
|
||||
route:
|
||||
enabled: {route_enabled}
|
||||
|
||||
@@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule {
|
||||
pub fn alert_container_restarting() -> PrometheusAlertRule {
|
||||
PrometheusAlertRule {
|
||||
alert: "ContainerRestarting".into(),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(),
|
||||
r#for: Some("5m".into()),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(),
|
||||
r#for: Some("30s".into()),
|
||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||
annotations: HashMap::from([
|
||||
(
|
||||
|
||||
@@ -282,7 +282,7 @@ impl RHOBAlertingInterpret {
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
let alert_manager_domain = topology
|
||||
.get_domain(format!("alert-manager-{}", self.sender.namespace.clone()))
|
||||
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-alert-manager", self.sender.namespace.clone());
|
||||
let backend_service = format!("alertmanager-operated");
|
||||
@@ -299,7 +299,7 @@ impl RHOBAlertingInterpret {
|
||||
};
|
||||
|
||||
let prometheus_domain = topology
|
||||
.get_domain(format!("prometheus-{}", self.sender.namespace.clone()))
|
||||
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-prometheus", self.sender.namespace.clone());
|
||||
let backend_service = format!("prometheus-operated");
|
||||
@@ -497,7 +497,7 @@ impl RHOBAlertingInterpret {
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
let domain = topology
|
||||
.get_domain(format!("grafana-{}", self.sender.namespace.clone()))
|
||||
.get_domain(&format!("grafana-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-grafana", self.sender.namespace.clone());
|
||||
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
|
||||
|
||||
Reference in New Issue
Block a user