fix: Multiple ingress fixes for localk3d, it works nicely now for Application and ntfy at least. Also fix k3d kubeconfig context by force switching to it every time. Not perfect but better and more intuitive for the user to view his resources.
This commit is contained in:
parent
21dcb75408
commit
11481b16cd
@ -3,5 +3,5 @@ use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Ingress {
|
||||
async fn get_domain(&self, service: String) -> Result<String, PreparationError>;
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError>;
|
||||
}
|
||||
|
@ -372,6 +372,8 @@ impl K8sAnywhereTopology {
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => {
|
||||
warn!("Installing observability operator is not supported on LocalK3d source");
|
||||
return Ok(PreparationOutcome::Noop);
|
||||
debug!("installing cluster observability operator");
|
||||
todo!();
|
||||
let op_score =
|
||||
@ -576,12 +578,12 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
||||
async fn get_domain(&self, service: String) -> Result<String, PreparationError> {
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||
match k8s_state.source {
|
||||
K8sSource::LocalK3d => Ok("localhost".to_string()),
|
||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
||||
K8sSource::Kubeconfig => {
|
||||
self.openshift_ingress_operator_available().await?;
|
||||
|
||||
|
@ -144,7 +144,7 @@ impl<
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
let image = self.application.image_name();
|
||||
let domain = topology
|
||||
.get_domain(self.application.name())
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
|
@ -55,7 +55,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let domain = topology.get_domain("argo".into()).await?;
|
||||
let domain = topology.get_domain("argo").await?;
|
||||
let helm_score =
|
||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||
|
||||
|
@ -50,7 +50,7 @@ impl<
|
||||
.await
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
let domain = topology.get_domain("ntfy".into()).await.unwrap();
|
||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||
|
||||
let mut alerting_score = ApplicationMonitoringScore {
|
||||
sender: CRDPrometheus {
|
||||
|
@ -6,6 +6,7 @@ use crate::modules::monitoring::application_monitoring::rhobs_application_monito
|
||||
|
||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||
use crate::topology::MultiTargetTopology;
|
||||
use crate::topology::ingress::Ingress;
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
@ -37,6 +38,7 @@ impl<
|
||||
+ TenantManager
|
||||
+ K8sclient
|
||||
+ MultiTargetTopology
|
||||
+ Ingress
|
||||
+ std::fmt::Debug
|
||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
||||
> ApplicationFeature<T> for RHOBMonitoring
|
||||
@ -59,7 +61,10 @@ impl<
|
||||
};
|
||||
let ntfy = NtfyScore {
|
||||
namespace: namespace.clone(),
|
||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
||||
host: topology
|
||||
.get_domain("ntfy")
|
||||
.await
|
||||
.map_err(|e| format!("Could not get domain {e}"))?,
|
||||
};
|
||||
ntfy.interpret(&Inventory::empty(), topology)
|
||||
.await
|
||||
|
@ -222,6 +222,7 @@ impl RustWebapp {
|
||||
".git",
|
||||
".github",
|
||||
".harmony_generated",
|
||||
"harmony",
|
||||
"node_modules",
|
||||
];
|
||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||
@ -425,7 +426,6 @@ impl RustWebapp {
|
||||
fs::create_dir_all(&templates_dir)?;
|
||||
|
||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||
let domain = format!("{}.{domain}", self.name);
|
||||
|
||||
// Create Chart.yaml
|
||||
let chart_yaml = format!(
|
||||
|
@ -153,6 +153,10 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
||||
Some(yaml_str) => {
|
||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||
debug!(
|
||||
"values yaml string for chart {} :\n {yaml_str}",
|
||||
self.score.chart_name
|
||||
);
|
||||
Some(tf.path())
|
||||
}
|
||||
None => None,
|
||||
|
@ -45,6 +45,12 @@ service:
|
||||
|
||||
ingress:
|
||||
enabled: {ingress_enabled}
|
||||
hosts:
|
||||
- host: {host}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
|
||||
|
||||
route:
|
||||
enabled: {route_enabled}
|
||||
|
@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule {
|
||||
pub fn alert_container_restarting() -> PrometheusAlertRule {
|
||||
PrometheusAlertRule {
|
||||
alert: "ContainerRestarting".into(),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(),
|
||||
r#for: Some("5m".into()),
|
||||
expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(),
|
||||
r#for: Some("30s".into()),
|
||||
labels: HashMap::from([("severity".into(), "warning".into())]),
|
||||
annotations: HashMap::from([
|
||||
(
|
||||
|
@ -282,7 +282,7 @@ impl RHOBAlertingInterpret {
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
let alert_manager_domain = topology
|
||||
.get_domain(format!("alert-manager-{}", self.sender.namespace.clone()))
|
||||
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-alert-manager", self.sender.namespace.clone());
|
||||
let backend_service = format!("alertmanager-operated");
|
||||
@ -299,7 +299,7 @@ impl RHOBAlertingInterpret {
|
||||
};
|
||||
|
||||
let prometheus_domain = topology
|
||||
.get_domain(format!("prometheus-{}", self.sender.namespace.clone()))
|
||||
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-prometheus", self.sender.namespace.clone());
|
||||
let backend_service = format!("prometheus-operated");
|
||||
@ -497,7 +497,7 @@ impl RHOBAlertingInterpret {
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
let domain = topology
|
||||
.get_domain(format!("grafana-{}", self.sender.namespace.clone()))
|
||||
.get_domain(&format!("grafana-{}", self.sender.namespace.clone()))
|
||||
.await?;
|
||||
let name = format!("{}-grafana", self.sender.namespace.clone());
|
||||
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
|
||||
|
@ -2,8 +2,8 @@ mod downloadable_asset;
|
||||
use downloadable_asset::*;
|
||||
|
||||
use kube::Client;
|
||||
use log::debug;
|
||||
use std::path::PathBuf;
|
||||
use log::{debug, info};
|
||||
use std::{ffi::OsStr, path::PathBuf};
|
||||
|
||||
const K3D_BIN_FILE_NAME: &str = "k3d";
|
||||
|
||||
@ -213,15 +213,19 @@ impl K3d {
|
||||
}
|
||||
}
|
||||
|
||||
let client;
|
||||
if !self.is_cluster_initialized() {
|
||||
debug!("Cluster is not initialized, initializing now");
|
||||
return self.initialize_cluster().await;
|
||||
}
|
||||
|
||||
client = self.initialize_cluster().await?;
|
||||
} else {
|
||||
self.start_cluster().await?;
|
||||
|
||||
debug!("K3d and cluster are already properly set up");
|
||||
self.create_kubernetes_client().await
|
||||
client = self.create_kubernetes_client().await?;
|
||||
}
|
||||
|
||||
self.ensure_k3d_config_is_default(self.get_cluster_name()?)?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
// Private helper methods
|
||||
@ -302,7 +306,16 @@ impl K3d {
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
let binary_path = self.get_k3d_binary()?;
|
||||
let output = std::process::Command::new(binary_path).args(args).output();
|
||||
self.run_command(binary_path, args)
|
||||
}
|
||||
|
||||
pub fn run_command<I, S, C>(&self, cmd: C, args: I) -> Result<std::process::Output, String>
|
||||
where
|
||||
I: IntoIterator<Item = S>,
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
C: AsRef<OsStr>,
|
||||
{
|
||||
let output = std::process::Command::new(cmd).args(args).output();
|
||||
match output {
|
||||
Ok(output) => {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
@ -311,7 +324,7 @@ impl K3d {
|
||||
debug!("stdout : {}", stdout);
|
||||
Ok(output)
|
||||
}
|
||||
Err(e) => Err(format!("Failed to execute k3d command: {}", e)),
|
||||
Err(e) => Err(format!("Failed to execute command: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -323,12 +336,38 @@ impl K3d {
|
||||
return Err(format!("Failed to create cluster: {}", stderr));
|
||||
}
|
||||
|
||||
debug!("Successfully created k3d cluster '{}'", cluster_name);
|
||||
info!("Successfully created k3d cluster '{}'", cluster_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_k3d_config_is_default(&self, cluster_name: &str) -> Result<(), String> {
|
||||
let output = self.run_k3d_command(["kubeconfig", "merge", "-d", cluster_name])?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!("Failed to setup k3d kubeconfig : {}", stderr));
|
||||
}
|
||||
|
||||
let output = self.run_command(
|
||||
"kubectl",
|
||||
["config", "use-context", &format!("k3d-{cluster_name}")],
|
||||
)?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!(
|
||||
"Failed to switch kubectl context to k3d : {}",
|
||||
stderr
|
||||
));
|
||||
}
|
||||
info!(
|
||||
"kubectl is now using 'k3d-{}' as default context",
|
||||
cluster_name
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_kubernetes_client(&self) -> Result<Client, String> {
|
||||
// TODO: Connect the client to the right k3d cluster (see https://git.nationtech.io/NationTech/harmony/issues/92)
|
||||
Client::try_default()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to create Kubernetes client: {}", e))
|
||||
|
Loading…
Reference in New Issue
Block a user