Monitor an application within a tenant #86

Merged
letian merged 22 commits from feat/crd-alertmanager-configs into master 2025-08-04 21:42:05 +00:00
20 changed files with 285 additions and 148 deletions
Showing only changes of commit 1525ac2226 - Show all commits

1
.gitignore vendored
View File

@ -2,3 +2,4 @@ target
private_repos
log/
*.tgz
.gitignore

38
Cargo.lock generated
View File

@ -1355,6 +1355,7 @@ dependencies = [
name = "example-rust"
version = "0.1.0"
dependencies = [
"base64 0.22.1",
"env_logger",
"harmony",
"harmony_cli",
@ -1427,6 +1428,18 @@ version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]]
name = "filetime"
version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
dependencies = [
"cfg-if",
"libc",
"libredox",
"windows-sys 0.59.0",
]
[[package]]
name = "flate2"
version = "1.1.2"
@ -1726,6 +1739,8 @@ name = "harmony"
version = "0.1.0"
dependencies = [
"async-trait",
"base64 0.22.1",
"bollard",
"chrono",
"cidr",
"convert_case",
@ -1767,6 +1782,7 @@ dependencies = [
"serde_yaml",
"similar",
"strum 0.27.1",
"tar",
"temp-dir",
"temp-file",
"tempfile",
@ -2746,6 +2762,7 @@ checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638"
dependencies = [
"bitflags 2.9.1",
"libc",
"redox_syscall",
]
[[package]]
@ -4749,6 +4766,17 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tar"
version = "0.4.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a"
dependencies = [
"filetime",
"libc",
"xattr",
]
[[package]]
name = "temp-dir"
version = "0.1.16"
@ -5794,6 +5822,16 @@ dependencies = [
"tap",
]
[[package]]
name = "xattr"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909"
dependencies = [
"libc",
"rustix 1.0.7",
]
[[package]]
name = "xml-rs"
version = "0.8.26"

View File

@ -53,3 +53,6 @@ chrono = "0.4"
similar = "2"
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
pretty_assertions = "1.4.1"
bollard = "0.19.1"
base64 = "0.22.1"
tar = "0.4.44"

View File

@ -14,6 +14,7 @@ async fn main() {
maestro.register_all(vec![Box::new(NtfyScore {
namespace: "monitoring".to_string(),
host: "localhost".to_string(),
})]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@ -12,3 +12,4 @@ tokio = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
base64.workspace = true

View File

@ -18,6 +18,12 @@ use harmony::{
#[tokio::main]
async fn main() {
env_logger::init();
let topology = K8sAnywhereTopology::from_env();
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
.await
.unwrap();
let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
@ -39,7 +45,7 @@ async fn main() {
features: vec![
Box::new(ContinuousDelivery {
application: application.clone(),
}),
}), // TODO add monitoring, backups, multisite ha, etc
Box::new(PrometheusApplicationMonitoring {
application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver), Box::new(webhook_receiver)],
Outdated
Review

As JG mentioned, we want to keep this as simple as possible, as opinionated as possible.

If we keep it named Monitoring we can just replace the inner implementation later.

As JG mentioned, we want to keep this as simple as possible, as opinionated as possible. If we keep it named `Monitoring` we can just replace the inner implementation later.
@ -49,10 +55,6 @@ async fn main() {
application,
};
let topology = K8sAnywhereTopology::from_env();
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
.await
.unwrap();
maestro.register_all(vec![Box::new(app)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@ -61,6 +61,9 @@ tempfile = "3.20.0"
serde_with = "3.14.0"
schemars = "0.8.22"
kube-derive = "1.1.0"
bollard.workspace = true
tar.workspace = true
base64.workspace = true
[dev-dependencies]
pretty_assertions.workspace = true

View File

@ -260,17 +260,33 @@ impl K8sClient {
) -> Result<(), Error> {
let obj: DynamicObject = serde_yaml::from_value(yaml.clone()).expect("TODO do not unwrap");
let name = obj.metadata.name.as_ref().expect("YAML must have a name");
let namespace = obj
let api_version = yaml
.get("apiVersion")
.expect("couldn't get apiVersion from YAML")
.as_str()
.expect("couldn't get apiVersion as str");
let kind = yaml
.get("kind")
.expect("couldn't get kind from YAML")
.as_str()
.expect("couldn't get kind as str");
let split: Vec<&str> = api_version.splitn(2, "/").collect();
let g = split[0];
let v = split[1];
let gvk = GroupVersionKind::gvk(g, v, kind);
let api_resource = ApiResource::from_gvk(&gvk);
let namespace = match ns {
Some(n) => n,
None => obj
.metadata
.namespace
.as_ref()
.expect("YAML must have a namespace");
// 4. Define the API resource type using the GVK from the object.
// The plural name 'applications' is taken from your CRD definition.
error!("This only supports argocd application harcoded, very rrrong");
let gvk = GroupVersionKind::gvk("argoproj.io", "v1alpha1", "Application");
let api_resource = ApiResource::from_gvk_with_plural(&gvk, "applications");
.expect("YAML must have a namespace"),
};
// 5. Create a dynamic API client for this resource type.
let api: Api<DynamicObject> =

View File

@ -1,11 +1,7 @@
use std::{backtrace, collections::HashMap};
use k8s_openapi::{Metadata, NamespaceResourceScope, Resource};
use log::debug;
use serde::Serialize;
use serde_with::skip_serializing_none;
use serde_yaml::Value;
use url::Url;
use crate::modules::application::features::CDApplicationConfig;
@ -33,11 +29,14 @@ pub struct Helm {
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Source {
// Using string for this because URL enforces a URL scheme at the beginning but Helm, ArgoCD, etc do not, and it can be counterproductive,
// as the only way I've found to get OCI working isn't by using oci:// but rather no scheme at all
#[serde(rename = "repoURL")]
pub repo_url: Url,
pub repo_url: String,
pub target_revision: Option<String>,
pub chart: String,
pub helm: Helm,
pub path: String,
}
#[derive(Clone, Debug, Serialize)]
@ -90,7 +89,7 @@ impl Default for ArgoApplication {
namespace: Default::default(),
project: Default::default(),
source: Source {
repo_url: Url::parse("http://asdf").expect("Couldn't parse to URL"),
repo_url: "http://asdf".to_string(),
target_revision: None,
chart: "".to_string(),
helm: Helm {
@ -109,6 +108,7 @@ impl Default for ArgoApplication {
api_versions: vec![],
namespace: None,
},
path: "".to_string(),
},
sync_policy: SyncPolicy {
automated: Automated {
@ -138,10 +138,10 @@ impl From<CDApplicationConfig> for ArgoApplication {
namespace: Some(value.namespace),
project: "default".to_string(),
source: Source {
repo_url: Url::parse(value.helm_chart_repo_url.to_string().as_str())
.expect("couldn't convert to URL"),
repo_url: value.helm_chart_repo_url,
target_revision: Some(value.version.to_string()),
chart: value.helm_chart_name,
chart: value.helm_chart_name.clone(),
path: value.helm_chart_name,
helm: Helm {
pass_credentials: None,
parameters: vec![],
@ -218,7 +218,7 @@ spec:
let mut yaml_value: Value =
serde_yaml::from_str(yaml_str.as_str()).expect("couldn't parse string to YAML");
let mut spec = yaml_value
let spec = yaml_value
.get_mut("spec")
.expect("couldn't get spec from yaml")
.as_mapping_mut()
@ -258,7 +258,6 @@ spec:
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use url::Url;
use crate::modules::application::features::{
ArgoApplication, Automated, Backoff, Helm, Retry, Source, SyncPolicy,
@ -271,7 +270,7 @@ mod tests {
namespace: Some("test-ns".to_string()),
project: "test-project".to_string(),
source: Source {
repo_url: Url::parse("http://test").unwrap(),
repo_url: "http://test".to_string(),
target_revision: None,
chart: "test-chart".to_string(),
helm: Helm {
@ -290,6 +289,7 @@ mod tests {
api_versions: vec![],
namespace: None,
},
path: "".to_string(),
},
sync_policy: SyncPolicy {
automated: Automated {
@ -321,7 +321,7 @@ spec:
server: https://kubernetes.default.svc
namespace: test-ns
source:
repoURL: http://test/
repoURL: http://test
chart: test-chart
helm:
parameters: []
@ -329,6 +329,7 @@ spec:
releaseName: test-release-neame
valueFiles: []
apiVersions: []
path: ''
syncPolicy:
automated:
prune: false

View File

@ -9,15 +9,12 @@ use crate::{
config::HARMONY_DATA_DIR,
data::Version,
inventory::Inventory,
modules::{
application::{
modules::application::{
Application, ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
},
helm::chart::HelmChartScore,
},
score::Score,
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, Url},
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
};
/// ContinuousDelivery in Harmony provides this functionality :
@ -188,12 +185,12 @@ impl<
info!("Deploying to target {target:?}");
let score = ArgoHelmScore {
namespace: "harmonydemo-staging".to_string(),
openshift: true,
openshift: false,
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart/harmony-example-rust-webapp-chart --version 0.1.0
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: Url::Url(url::Url::parse("oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart/harmony-example-rust-webapp-chart").unwrap()),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
values_overrides: None,
name: "harmony-demo-rust-webapp".to_string(),
@ -207,14 +204,7 @@ impl<
.unwrap();
}
};
todo!("1. Create ArgoCD score that installs argo using helm chart, see if Taha's already done it
- [X] Package app (docker image, helm chart)
- [X] Push to registry
- [X] Push only if staging or prod
- [X] Deploy to local k3d when target is local
- [ ] Poke Argo
- [ ] Ensure app is up")
Ok(())
}
fn name(&self) -> String {
"ContinuousDelivery".to_string()
@ -225,7 +215,7 @@ impl<
/// more CD systems
pub struct CDApplicationConfig {
pub version: Version,
pub helm_chart_repo_url: Url,
pub helm_chart_repo_url: String,
pub helm_chart_name: String,
pub values_overrides: Option<Value>,
pub name: String,

View File

@ -1,5 +1,4 @@
use async_trait::async_trait;
use k8s_openapi::Resource;
use log::error;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
@ -647,7 +646,7 @@ server:
# Argo CD server ingress configuration
ingress:
# -- Enable an ingress resource for the Argo CD server
enabled: false
enabled: true
# -- Specific implementation for ingress controller. One of `generic`, `aws` or `gke`
## Additional configuration might be required in related configuration sections
controller: generic

View File

@ -1,27 +1,33 @@
use std::sync::Arc;
use async_trait::async_trait;
use log::info;
use base64::{Engine as _, engine::general_purpose};
use log::{debug, info};
use crate::{
inventory::Inventory,
modules::{
application::{Application, ApplicationFeature},
monitoring::kube_prometheus::{
application::{Application, ApplicationFeature, OCICompliant},
monitoring::{
alert_channel::webhook_receiver::WebhookReceiver,
kube_prometheus::{
alert_manager_config::{CRDAlertManager, CRDAlertManagerReceiver},
helm_prometheus_application_alerting::HelmPrometheusApplicationAlertingScore,
types::{NamespaceSelector, ServiceMonitor},
},
ntfy::ntfy::NtfyScore,
},
},
score::Score,
topology::{
HelmCommand, K8sclient, Topology, oberservability::monitoring::AlertReceiver,
HelmCommand, K8sclient, Topology, Url, oberservability::monitoring::AlertReceiver,
tenant::TenantManager,
},
};
#[derive(Debug, Clone)]
pub struct PrometheusApplicationMonitoring {
pub application: Arc<dyn Application>,
pub application: Arc<dyn OCICompliant>,
pub alert_receiver: Vec<Box<dyn CRDAlertManagerReceiver>>,
}
@ -32,10 +38,66 @@ impl<T: Topology + HelmCommand + 'static + TenantManager + K8sclient + std::fmt:
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
info!("Ensuring monitoring is available for application");
let alerting_score = HelmPrometheusApplicationAlertingScore {
let mut alerting_score = HelmPrometheusApplicationAlertingScore {
namespace: self.application.name().clone(),
receivers: self.alert_receiver.clone(),
};
let ntfy = NtfyScore {
// namespace: topology
// .get_tenant_config()
// .await
// .expect("couldn't get tenant config")
// .name,
namespace: self.application.name(),
host: "localhost".to_string(),
};
ntfy.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.expect("couldn't create interpret for ntfy");
let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
let ntfy_default_auth_header = format!(
"Basic {}",
general_purpose::STANDARD.encode(format!(
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
))
);
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
let ntfy_default_auth_param = general_purpose::STANDARD
.encode(ntfy_default_auth_header)
.replace("=", "");
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
let ntfy_receiver = WebhookReceiver {
name: "ntfy-webhook".to_string(),
url: Url::Url(
url::Url::parse(
format!(
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
self.application.name()
)
.as_str(),
)
.unwrap(),
),
};
alerting_score.receivers.push(Box::new(ntfy_receiver));
//TODO add service monitors to PrometheusApplicationMonitoring which can be
//deployed for the namespace using prometheus crd-servicemonitors
let mut service_monitor = ServiceMonitor::default();
service_monitor.namespace_selector = Some(NamespaceSelector {
any: true,
match_names: vec![],
});
service_monitor.name = "rust-webapp".to_string();
alerting_score
.create_interpret()

View File

@ -59,9 +59,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
}
};
}
todo!(
"Do I need to do anything more than this here?? I feel like the Application trait itself should expose something like ensure_ready but its becoming redundant. We'll see as this evolves."
)
Ok(Outcome::success("successfully created app".to_string()))
}
fn get_name(&self) -> InterpretName {

View File

@ -4,11 +4,15 @@ use std::process;
use std::sync::Arc;
use async_trait::async_trait;
use bollard::query_parameters::PushImageOptionsBuilder;
use bollard::{Docker, body_full};
use dockerfile_builder::Dockerfile;
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt;
use log::{debug, error, info};
use serde::Serialize;
use tar::Archive;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::{
@ -108,6 +112,7 @@ impl OCICompliant for RustWebapp {
// 1. Build the local image by calling the synchronous helper function.
let local_image_name = self.local_image_name();
self.build_docker_image(&local_image_name)
.await
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
info!(
"Successfully built local Docker image: {}",
@ -117,6 +122,7 @@ impl OCICompliant for RustWebapp {
let remote_image_name = self.image_name();
// 2. Push the image to the registry.
self.push_docker_image(&local_image_name, &remote_image_name)
.await
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
info!("Successfully pushed Docker image to: {}", remote_image_name);
@ -153,66 +159,68 @@ impl RustWebapp {
}
/// Builds the Docker image using the generated Dockerfile.
pub fn build_docker_image(
pub async fn build_docker_image(
&self,
image_name: &str,
) -> Result<String, Box<dyn std::error::Error>> {
info!("Generating Dockerfile for '{}'", self.name);
let dockerfile_path = self.build_dockerfile()?;
let _dockerfile_path = self.build_dockerfile()?;
info!(
"Building Docker image with file {} from root {}",
dockerfile_path.to_string_lossy(),
self.project_root.to_string_lossy()
let docker = Docker::connect_with_socket_defaults().unwrap();
let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default()
.dockerfile("Dockerfile.harmony")
.t(image_name)
.q(false)
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
.platform("linux/x86_64");
let mut temp_tar_builder = tar::Builder::new(Vec::new());
let _ = temp_tar_builder
.append_dir_all("", self.project_root.clone())
.unwrap();
let archive = temp_tar_builder
.into_inner()
.expect("couldn't finish creating tar");
let archived_files = Archive::new(archive.as_slice())
.entries()
.unwrap()
.map(|entry| entry.unwrap().path().unwrap().into_owned())
.collect::<Vec<_>>();
debug!("files in docker tar: {:#?}", archived_files);
let mut image_build_stream = docker.build_image(
build_image_options.build(),
None,
Some(body_full(archive.into())),
);
let output = process::Command::new("docker")
.args([
"build",
"--file",
dockerfile_path.to_str().unwrap(),
"-t",
&image_name,
self.project_root.to_str().unwrap(),
])
.spawn()?
.wait_with_output()?;
self.check_output(&output, "Failed to build Docker image")?;
while let Some(msg) = image_build_stream.next().await {
println!("Message: {msg:?}");
}
Ok(image_name.to_string())
}
/// Tags and pushes a Docker image to the configured remote registry.
fn push_docker_image(
async fn push_docker_image(
&self,
image_name: &str,
full_tag: &str,
) -> Result<String, Box<dyn std::error::Error>> {
info!("Pushing docker image {full_tag}");
// Tag the image for the remote registry.
let output = process::Command::new("docker")
.args(["tag", image_name, &full_tag])
.spawn()?
.wait_with_output()?;
self.check_output(&output, "Tagging docker image failed")?;
debug!(
"docker tag output: stdout: {}, stderr: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
let docker = Docker::connect_with_socket_defaults().unwrap();
// Push the image.
let output = process::Command::new("docker")
.args(["push", &full_tag])
.spawn()?
.wait_with_output()?;
self.check_output(&output, "Pushing docker image failed")?;
debug!(
"docker push output: stdout: {}, stderr: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
// let push_options = PushImageOptionsBuilder::new().tag(tag);
let mut push_image_stream =
docker.push_image(full_tag, Some(PushImageOptionsBuilder::new().build()), None);
while let Some(msg) = push_image_stream.next().await {
println!("Message: {msg:?}");
}
Ok(full_tag.to_string())
}
@ -349,7 +357,11 @@ impl RustWebapp {
image_url: &str,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_name = format!("{}-chart", self.name);
let chart_dir = self.project_root.join("helm").join(&chart_name);
let chart_dir = self
.project_root
.join(".harmony_generated")
.join("helm")
.join(&chart_name);
let templates_dir = chart_dir.join("templates");
fs::create_dir_all(&templates_dir)?;
@ -416,7 +428,7 @@ ingress:
Expand the name of the chart.
*/}}
{{- define "chart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
@ -424,7 +436,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "chart.fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- $name := default .Chart.Name $.Values.nameOverride }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
"#;
@ -437,12 +449,12 @@ kind: Service
metadata:
name: {{ include "chart.fullname" . }}
spec:
type: {{ .Values.service.type }}
type: {{ $.Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: 3000
- name: main
port: {{ $.Values.service.port | default 3000 }}
targetPort: {{ $.Values.service.port | default 3000 }}
protocol: TCP
name: http
selector:
app: {{ include "chart.name" . }}
"#;
@ -455,7 +467,7 @@ kind: Deployment
metadata:
name: {{ include "chart.fullname" . }}
spec:
replicas: {{ .Values.replicaCount }}
replicas: {{ $.Values.replicaCount }}
selector:
matchLabels:
app: {{ include "chart.name" . }}
@ -466,28 +478,28 @@ spec:
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ $.Values.image.pullPolicy }}
ports:
- name: http
containerPort: 3000
- name: main
containerPort: {{ $.Values.service.port | default 3000 }}
protocol: TCP
"#;
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
// Create templates/ingress.yaml
let ingress_yaml = r#"
{{- if .Values.ingress.enabled -}}
{{- if $.Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "chart.fullname" . }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
{{- toYaml $.Values.ingress.annotations | nindent 4 }}
spec:
{{- if .Values.ingress.tls }}
{{- if $.Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
{{- range $.Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
@ -496,7 +508,7 @@ spec:
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
{{- range $.Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
@ -507,7 +519,7 @@ spec:
service:
name: {{ include "chart.fullname" $ }}
port:
number: 3000
number: {{ $.Values.service.port | default 3000 }}
{{- end }}
{{- end }}
{{- end }}
@ -526,11 +538,15 @@ spec:
info!(
"Launching `helm package {}` cli with CWD {}",
chart_dirname.to_string_lossy(),
&self.project_root.join("helm").to_string_lossy()
&self
.project_root
.join(".harmony_generated")
.join("helm")
.to_string_lossy()
);
let output = process::Command::new("helm")
.args(["package", chart_dirname.to_str().unwrap()])
.current_dir(&self.project_root.join("helm")) // Run package from the parent dir
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
.output()?;
self.check_output(&output, "Failed to package Helm chart")?;
@ -547,7 +563,11 @@ spec:
}
// The output from helm is relative, so we join it with the execution directory.
Ok(self.project_root.join("helm").join(tgz_name))
Ok(self
.project_root
.join(".harmony_generated")
.join("helm")
.join(tgz_name))
}
/// Pushes a packaged Helm chart to an OCI registry.

View File

@ -24,7 +24,10 @@ pub struct ApplicationPrometheusMonitoringScore {
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
let config = Arc::new(Mutex::new(PrometheusConfig::new()));
let mut prom_config = PrometheusConfig::new();
prom_config.alert_manager = true;
let config = Arc::new(Mutex::new(prom_config));
config
.try_lock()
.expect("couldn't lock config")

View File

@ -28,8 +28,6 @@ use super::types::AlertManagerConfig;
namespaced
)]
pub struct AlertmanagerConfigSpec {
// Define the spec fields here, or use serde's `flatten` if you want to store arbitrary data
// Example placeholder:
#[serde(flatten)]
pub data: serde_json::Value,
}

View File

@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
.expect("couldn't lock config")
.additional_service_monitors = self.service_monitors.clone();
Box::new(AlertingInterpret {
sender: KubePrometheus::new(),
sender: KubePrometheus { config },
receivers: self.receivers.clone(),
rules: self.rules.clone(),
})

View File

@ -3,7 +3,7 @@ use std::str::FromStr;
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
pub fn ntfy_helm_chart_score(namespace: String) -> HelmChartScore {
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
let values = format!(
r#"
replicaCount: 1
@ -28,12 +28,12 @@ service:
port: 80
ingress:
enabled: false
enabled: true
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: ntfy.host.com
- host: {host}
paths:
- path: /
pathType: ImplementationSpecific

View File

@ -17,6 +17,7 @@ use crate::{
#[derive(Debug, Clone, Serialize)]
pub struct NtfyScore {
pub namespace: String,
pub host: String,
}
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
@ -126,7 +127,7 @@ impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
ntfy_helm_chart_score(self.score.namespace.clone())
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
.create_interpret()
.execute(inventory, topology)
.await?;