Compare commits

..

4 Commits

24 changed files with 411 additions and 289 deletions

View File

@@ -1,5 +1,3 @@
use std::time::Duration;
use derive_new::new; use derive_new::new;
use k8s_openapi::{ use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope, ClusterResourceScope, NamespaceResourceScope,
@@ -20,9 +18,11 @@ use kube::{
}; };
use log::{debug, error, trace}; use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use serde_json::json; use serde_json::{Value, json};
use similar::TextDiff; use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep}; use tokio::io::AsyncReadExt;
use crate::interpret::Outcome;
#[derive(new, Clone)] #[derive(new, Clone)]
pub struct K8sClient { pub struct K8sClient {
@@ -56,6 +56,57 @@ impl K8sClient {
}) })
} }
pub async fn ensure_deployment(
&self,
resource_name: &str,
resource_namespace: &str,
) -> Result<Outcome, Error> {
match self
.get_deployment(resource_name, Some(&resource_namespace))
.await
{
Ok(Some(deployment)) => {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
resource_name, ready_count
)))
} else {
Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"Deployment '{}' in namespace '{}' has 0 ready replicas",
resource_name, resource_namespace
))))
}
} else {
Err(Error::Api(ErrorResponse {
status: "Failure".to_string(),
message: format!(
"No status found for deployment '{}' in namespace '{}'",
resource_name, resource_namespace
),
reason: "MissingStatus".to_string(),
code: 404,
}))
}
}
Ok(None) => Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"Deployment '{}' not found in namespace '{}'",
resource_name, resource_namespace
)))),
Err(e) => Err(Error::Api(ErrorResponse {
status: "Failure".to_string(),
message: format!(
"Failed to fetch deployment '{}' in namespace '{}': {}",
resource_name, resource_namespace, e
),
reason: "ApiError".to_string(),
code: 500,
})),
}
}
pub async fn get_resource_json_value( pub async fn get_resource_json_value(
&self, &self,
name: &str, name: &str,
@@ -68,7 +119,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced_with(self.client.clone(), &gvk) Api::default_namespaced_with(self.client.clone(), &gvk)
}; };
resource.get(name).await Ok(resource.get(name).await?)
} }
pub async fn get_deployment( pub async fn get_deployment(
@@ -81,7 +132,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
deps.get_opt(name).await Ok(deps.get_opt(name).await?)
} }
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> { pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
@@ -90,7 +141,26 @@ impl K8sClient {
} else { } else {
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
pods.get_opt(name).await Ok(pods.get_opt(name).await?)
}
pub async fn patch_resource_by_merge(
&self,
name: &str,
namespace: Option<&str>,
gvk: &GroupVersionKind,
patch: Value,
) -> Result<(), Error> {
let gvk = ApiResource::from_gvk(gvk);
let resource: Api<DynamicObject> = if let Some(ns) = namespace {
Api::namespaced_with(self.client.clone(), ns, &gvk)
} else {
Api::default_namespaced_with(self.client.clone(), &gvk)
};
let pp = PatchParams::default();
let merge = Patch::Merge(&patch);
resource.patch(name, &pp, &merge).await?;
Ok(())
} }
pub async fn scale_deployment( pub async fn scale_deployment(
@@ -156,39 +226,6 @@ impl K8sClient {
} }
} }
pub async fn wait_for_pod_ready(
&self,
pod_name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let mut elapsed = 0;
let interval = 5; // seconds between checks
let timeout_secs = 120;
loop {
let pod = self.get_pod(pod_name, namespace).await?;
if let Some(p) = pod
&& let Some(status) = p.status
&& let Some(phase) = status.phase
&& phase.to_lowercase() == "running"
{
return Ok(());
}
if elapsed >= timeout_secs {
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"'{}' in ns '{}' did not become ready within {}s",
pod_name,
namespace.unwrap(),
timeout_secs
))));
}
sleep(Duration::from_secs(interval)).await;
elapsed += interval;
}
}
/// Will execute a commond in the first pod found that matches the specified label /// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}' /// '{label}={name}'
pub async fn exec_app_capture_output( pub async fn exec_app_capture_output(
@@ -235,7 +272,7 @@ impl K8sClient {
if let Some(s) = status.status { if let Some(s) = status.status {
let mut stdout_buf = String::new(); let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout() { if let Some(mut stdout) = process.stdout().take() {
stdout stdout
.read_to_string(&mut stdout_buf) .read_to_string(&mut stdout_buf)
.await .await
@@ -455,12 +492,9 @@ impl K8sClient {
.as_str() .as_str()
.expect("couldn't get kind as str"); .expect("couldn't get kind as str");
let mut it = api_version.splitn(2, '/'); let split: Vec<&str> = api_version.splitn(2, "/").collect();
let first = it.next().unwrap(); let g = split[0];
let (g, v) = match it.next() { let v = split[1];
Some(second) => (first, second),
None => ("", first),
};
let gvk = GroupVersionKind::gvk(g, v, kind); let gvk = GroupVersionKind::gvk(g, v, kind);
let api_resource = ApiResource::from_gvk(&gvk); let api_resource = ApiResource::from_gvk(&gvk);

View File

@@ -212,11 +212,11 @@ impl K8sAnywhereTopology {
.await?; .await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0); let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 { if ready_replicas >= 1 {
Ok(()) return Ok(());
} else { } else {
Err(PreparationError::new( return Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(), "openshift-ingress-operator not available".to_string(),
)) ));
} }
} }

View File

@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
impl InventoryRepositoryFactory { impl InventoryRepositoryFactory {
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> { pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
Ok(Box::new( Ok(Box::new(
SqliteInventoryRepository::new(&DATABASE_URL).await?, SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
)) ))
} }
} }

View File

@@ -36,7 +36,7 @@ impl HttpServer for OPNSenseFirewall {
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
let path = match &file.path { let path = match &file.path {
crate::data::FilePath::Relative(path) => { crate::data::FilePath::Relative(path) => {
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path) format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
} }
crate::data::FilePath::Absolute(path) => { crate::data::FilePath::Absolute(path) => {
return Err(ExecutorError::ConfigurationError(format!( return Err(ExecutorError::ConfigurationError(format!(

View File

@@ -182,12 +182,16 @@ pub(crate) fn get_health_check_for_backend(
let uppercase = binding.as_str(); let uppercase = binding.as_str();
match uppercase { match uppercase {
"TCP" => { "TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
&& !checkport.is_empty() if !checkport.is_empty() {
{ return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else( |_| {
|_| panic!("HAProxy check port should be a valid port number, got {checkport}"), panic!(
)))); "HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
}
} }
Some(HealthCheck::TCP(None)) Some(HealthCheck::TCP(None))
} }

View File

@@ -8,6 +8,7 @@ mod tftp;
use std::sync::Arc; use std::sync::Arc;
pub use management::*; pub use management::*;
use opnsense_config_xml::Host;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use crate::{executors::ExecutorError, topology::LogicalHost}; use crate::{executors::ExecutorError, topology::LogicalHost};

View File

@@ -1,8 +1,10 @@
use async_trait::async_trait; use async_trait::async_trait;
use kube::api::GroupVersionKind; use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use non_blank_string_rs::NonBlankString; use non_blank_string_rs::NonBlankString;
use serde::Serialize; use serde::Serialize;
use std::{str::FromStr, sync::Arc}; use serde::de::DeserializeOwned;
use std::{process::Command, str::FromStr, sync::Arc};
use crate::{ use crate::{
data::Version, data::Version,
@@ -10,7 +12,10 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository}, modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score, score::Score,
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient}, topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
},
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
@@ -114,13 +119,13 @@ impl ArgoInterpret {
match ic.data["status"]["domain"].as_str() { match ic.data["status"]["domain"].as_str() {
Some(domain) => return Ok(domain.to_string()), Some(domain) => return Ok(domain.to_string()),
None => Err(InterpretError::new("Could not find domain".to_string())), None => return Err(InterpretError::new("Could not find domain".to_string())),
} }
} }
false => { false => {
todo!() todo!()
} }
} };
} }
} }

View File

@@ -190,7 +190,7 @@ impl<
info!("Deploying {} to target {target:?}", self.application.name()); info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore { let score = ArgoHelmScore {
namespace: self.application.name().to_string(), namespace: format!("{}", self.application.name()),
openshift: true, openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig { argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
@@ -198,8 +198,8 @@ impl<
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()), helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None, values_overrides: None,
name: self.application.name().to_string(), name: format!("{}", self.application.name()),
namespace: self.application.name().to_string(), namespace: format!("{}", self.application.name()),
})], })],
}; };
score score

View File

@@ -3,6 +3,7 @@ use std::sync::Arc;
use crate::modules::application::{ use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome, Application, ApplicationFeature, InstallationError, InstallationOutcome,
}; };
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore; use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;

View File

@@ -194,10 +194,10 @@ impl RustWebapp {
Some(body_full(tar_data.into())), Some(body_full(tar_data.into())),
); );
while let Some(msg) = image_build_stream.next().await { while let Some(mut msg) = image_build_stream.next().await {
trace!("Got bollard msg {msg:?}"); trace!("Got bollard msg {msg:?}");
match msg { match msg {
Ok(msg) => { Ok(mut msg) => {
if let Some(progress) = msg.progress_detail { if let Some(progress) = msg.progress_detail {
info!( info!(
"Build progress {}/{}", "Build progress {}/{}",
@@ -511,23 +511,25 @@ ingress:
fs::write(chart_dir.join("values.yaml"), values_yaml)?; fs::write(chart_dir.join("values.yaml"), values_yaml)?;
// Create templates/_helpers.tpl // Create templates/_helpers.tpl
let helpers_tpl = r#" let helpers_tpl = format!(
{{/* r#"
{{{{/*
Expand the name of the chart. Expand the name of the chart.
*/}} */}}}}
{{- define "chart.name" -}} {{{{- define "chart.name" -}}}}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }} {{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
{{- end }} {{{{- end }}}}
{{/* {{{{/*
Create a default fully qualified app name. Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}} */}}}}
{{- define "chart.fullname" -}} {{{{- define "chart.fullname" -}}}}
{{- $name := default .Chart.Name $.Values.nameOverride }} {{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
{{- end }} {{{{- end }}}}
"#.to_string(); "#
);
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?; fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
// Create templates/service.yaml // Create templates/service.yaml

View File

@@ -1,2 +1,3 @@
mod helm; mod helm;
pub mod update_default_okd_ingress_score;
pub use helm::*; pub use helm::*;

View File

@@ -0,0 +1,223 @@
use std::{
fs::File,
io::Read,
path::{Path, PathBuf},
sync::Arc,
};
use base64::{Engine, prelude::BASE64_STANDARD};
use fqdn::Path;
use harmony_types::id::Id;
use kube::api::GroupVersionKind;
use serde_json::json;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
pub struct UpdateDefaultOkdIngressScore {
operator_name: String,
operator_namespace: String,
ca_name: String,
path_to_tls_crt: Path,
path_to_tls_key: Path,
path_to_ca_cert: Path,
}
impl<T: Topology> Score<T> for UpdateDefaultOkdIngressScore {
fn name(&self) -> String {
"UpdateDefaultOkdIngressScore".to_string()
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(UpdateDefaultOkdIngressInterpret {
score: self.clone(),
})
}
}
pub struct UpdateDefaultOkdIngressInterpret {
score: UpdateDefaultOkdIngressScore,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for UpdateDefaultOkdIngressInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await?;
let secret_name = "ingress_ca_secret";
self.ensure_ingress_operator(
&client,
&self.score.operator_name,
&self.score.operator_namespace,
)
.await?;
self.create_ca_cm(&client, self.score.path_to_ca_cert, &self.score.ca_name)
.await?;
self.patch_proxy(&client, &self.score.ca_name).await?;
self.create_tls_secret(
&client,
self.score.path_to_tls_crt,
self.score.path_to_tls_key,
&self.score.operator_namespace,
&secret_name,
)
.await?;
self.patch_ingress(&client, &self.score.operator_namespace, &secret_name)
.await?;
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("UpdateDefaultOkdIngress")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl UpdateDefaultOkdIngressInterpret {
async fn ensure_ingress_operator(
&self,
client: &Arc<K8sClient>,
operator_name: &str,
operator_namespace: &str,
) -> Result<Outcome, InterpretError> {
client
.ensure_deployment(operator_name, Some(operator_namespace))
.await?
}
fn open_path(&self, path: Path) -> Result<String, InterpretError> {
let mut file = match File::open(&path) {
Ok(file) => file,
Err(e) => InterpretError::new(format!("Could not open file {}", e)),
};
let s = String::new();
match file.read_to_string(&mut s) {
Ok(s) => Ok(s),
Err(e) => InterpretError::new(format!("Could not read file {}", e)),
}
}
async fn create_ca_cm(
&self,
client: &Arc<K8sClient>,
path_to_ca_cert: Path,
ca_name: &str,
) -> Result<Outcome, InterpretError> {
let ca_bundle = BASE64_STANDARD.encode(self.open_path(path_to_ca_cert).unwrap().as_bytes());
let cm = format!(
r"#
apiVersion: v1
kind: ConfigMap
metadata:
name: custom-ca
namespace: openshift-config
data:
ca-bundle.crt: {ca_bundle}
#"
);
client.apply_yaml(serde_yaml::to_value(&cm), Some("openshift-config")).await?;
Ok(Outcome::success(format!(
"successfully created cm : {} in openshift-config namespace",
ca_name
)))
}
async fn patch_proxy(
&self,
client: &Arc<K8sClient>,
ca_name: &str,
) -> Result<Outcome, InterpretError> {
let gvk = GroupVersionKind {
group: "config.openshift.io".to_string(),
version: "v1".to_string(),
kind: "Proxy".to_string(),
};
let patch = json!({
"spec": {
"trustedCA": {
"name": ca_name
}
}
});
client
.patch_resource_by_merge("cluster", None, &gvk, patch)
.await?;
Ok(Outcome::success(format!(
"successfully merged trusted ca to cluster proxy"
)))
}
async fn create_tls_secret(
&self,
client: &Arc<K8sClient>,
tls_crt: Path,
tls_key: Path,
operator_namespace: &str,
secret_name: &str,
) -> Result<Outcome, InterpretError> {
let base64_tls_crt = BASE64_STANDARD.encode(self.open_path(tls_crt).unwrap().as_bytes());
let base64_tls_key = BASE64_STANDARD.encode(self.open_path(tls_key).unwrap().as_bytes());
let secret = format!(
r#"
apiVersion: v1
kind: Secret
metadata:
name: secret-tls
namespace: {operator_namespace}
type: kubernetes.io/tls
data:
# values are base64 encoded, which obscures them but does NOT provide
# any useful level of confidentiality
# Replace the following values with your own base64-encoded certificate and key.
tls.crt: "{base64_tls_crt}"
tls.key: "{base64_tls_key}"
"#
);
client
.apply_yaml(serde_yaml::to_value(secret), Some(operator_namespace))
.await?;
Ok(Outcome::success(format!(
"successfully created tls secret trusted ca to cluster proxy"
)))
}
async fn patch_ingress(
&self,
client: &Arc<K8sClient>,
operator_namespace: &str,
secret_name: &str,
) -> Result<Outcome, InterpretError> {
let gvk = GroupVersionKind {
group: "operator.openshift.io".to_string(),
version: "v1".to_string(),
kind: "IngressController".to_string(),
};
let patch = json!(
{"spec":{"defaultCertificate": {"name": secret_name}}});
client
.patch_resource_by_merge("default", Some(operator_namespace), &gvk, patch)
.await?;
Ok(Outcome::success(format!("successfully pathed ingress operator to use secret {}", secret_name)))
}
}

View File

@@ -66,7 +66,8 @@ impl HelmCommandExecutor {
.is_none() .is_none()
{ {
if self.chart.repo.is_none() { if self.chart.repo.is_none() {
return Err(std::io::Error::other( return Err(std::io::Error::new(
ErrorKind::Other,
"Chart doesn't exist locally and no repo specified", "Chart doesn't exist locally and no repo specified",
)); ));
} }
@@ -106,10 +107,10 @@ impl HelmCommandExecutor {
} }
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> { pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug if let Some(d) = self.debug {
&& d if d {
{ args.push("--debug".to_string());
args.push("--debug".to_string()); }
} }
let path = if let Some(p) = self.path { let path = if let Some(p) = self.path {
@@ -233,28 +234,28 @@ impl HelmChart {
args.push(kv); args.push(kv);
} }
if let Some(crd) = self.include_crds if let Some(crd) = self.include_crds {
&& crd if crd {
{ args.push("--include-crds".to_string());
args.push("--include-crds".to_string()); }
} }
if let Some(st) = self.skip_tests if let Some(st) = self.skip_tests {
&& st if st {
{ args.push("--skip-tests".to_string());
args.push("--skip-tests".to_string()); }
} }
if let Some(sh) = self.skip_hooks if let Some(sh) = self.skip_hooks {
&& sh if sh {
{ args.push("--no-hooks".to_string());
args.push("--no-hooks".to_string()); }
} }
if let Some(d) = self.debug if let Some(d) = self.debug {
&& d if d {
{ args.push("--debug".to_string());
args.push("--debug".to_string()); }
} }
args args

View File

@@ -63,7 +63,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
} }
for f in self.score.files.iter() { for f in self.score.files.iter() {
http_server.serve_file_content(f).await? http_server.serve_file_content(&f).await?
} }
http_server.commit_config().await?; http_server.commit_config().await?;

View File

@@ -92,7 +92,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
); );
return Err(InterpretError::new(format!( return Err(InterpretError::new(format!(
"Could not select host : {}", "Could not select host : {}",
e e.to_string()
))); )));
} }
} }

View File

@@ -9,7 +9,9 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
application::Application, application::Application,
monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability, monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
},
prometheus::prometheus::PrometheusApplicationMonitoring, prometheus::prometheus::PrometheusApplicationMonitoring,
}, },
score::Score, score::Score,

View File

@@ -1,8 +1,12 @@
use std::collections::BTreeMap;
use kube::CustomResource; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector; use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
LabelSelector, PrometheusSpec,
};
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1 /// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@@ -4,5 +4,4 @@ pub mod application_monitoring;
pub mod grafana; pub mod grafana;
pub mod kube_prometheus; pub mod kube_prometheus;
pub mod ntfy; pub mod ntfy;
pub mod okd;
pub mod prometheus; pub mod prometheus;

View File

@@ -1,149 +0,0 @@
use std::{collections::BTreeMap, sync::Arc};
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
use async_trait::async_trait;
use harmony_types::id::Id;
use k8s_openapi::api::core::v1::ConfigMap;
use kube::api::ObjectMeta;
use serde::Serialize;
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoring {}
impl<T: Topology + K8sclient> Score<T> for OpenshiftUserWorkloadMonitoring {
fn name(&self) -> String {
"OpenshiftUserWorkloadMonitoringScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OpenshiftUserWorkloadMonitoringInterpret {})
}
}
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoringInterpret {}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.update_cluster_monitoring_config_cm(&client).await?;
self.update_user_workload_monitoring_config_cm(&client)
.await?;
self.verify_user_workload(&client).await?;
Ok(Outcome::success(
"successfully enabled user-workload-monitoring".to_string(),
))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OpenshiftUserWorkloadMonitoring")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl OpenshiftUserWorkloadMonitoringInterpret {
pub async fn update_cluster_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
enableUserWorkload: true
alertmanagerMain:
enableUserAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("cluster-monitoring-config".to_string()),
namespace: Some("openshift-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client.apply(&cm, Some("openshift-monitoring")).await?;
Ok(Outcome::success(
"updated cluster-monitoring-config-map".to_string(),
))
}
pub async fn update_user_workload_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
alertmanager:
enabled: true
enableAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("user-workload-monitoring-config".to_string()),
namespace: Some("openshift-user-workload-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client
.apply(&cm, Some("openshift-user-workload-monitoring"))
.await?;
Ok(Outcome::success(
"updated openshift-user-monitoring-config-map".to_string(),
))
}
pub async fn verify_user_workload(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let namespace = "openshift-user-workload-monitoring";
let alertmanager_name = "alertmanager-user-workload-0";
let prometheus_name = "prometheus-user-workload-0";
client
.wait_for_pod_ready(alertmanager_name, Some(namespace))
.await?;
client
.wait_for_pod_ready(prometheus_name, Some(namespace))
.await?;
Ok(Outcome::success(format!(
"pods: {}, {} ready in ns: {}",
alertmanager_name, prometheus_name, namespace
)))
}
}

View File

@@ -1 +0,0 @@
pub mod enable_user_workload;

View File

@@ -52,12 +52,6 @@ pub struct OKDSetup02BootstrapInterpret {
status: InterpretStatus, status: InterpretStatus,
} }
impl Default for OKDSetup02BootstrapInterpret {
fn default() -> Self {
Self::new()
}
}
impl OKDSetup02BootstrapInterpret { impl OKDSetup02BootstrapInterpret {
pub fn new() -> Self { pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap(); let version = Version::from("1.0.0").unwrap();
@@ -104,9 +98,9 @@ impl OKDSetup02BootstrapInterpret {
InterpretError::new(format!("Failed to create okd installation directory : {e}")) InterpretError::new(format!("Failed to create okd installation directory : {e}"))
})?; })?;
if !exit_status.success() { if !exit_status.success() {
return Err(InterpretError::new( return Err(InterpretError::new(format!(
"Failed to create okd installation directory".to_string(), "Failed to create okd installation directory"
)); )));
} else { } else {
info!( info!(
"Created OKD installation directory {}", "Created OKD installation directory {}",

View File

@@ -254,7 +254,7 @@ impl RHOBAlertingInterpret {
let stack = MonitoringStack { let stack = MonitoringStack {
metadata: ObjectMeta { metadata: ObjectMeta {
name: Some(format!("{}-monitoring", self.sender.namespace.clone())), name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
namespace: Some(self.sender.namespace.clone()), namespace: Some(self.sender.namespace.clone()),
labels: Some([("monitoring-stack".into(), "true".into())].into()), labels: Some([("monitoring-stack".into(), "true".into())].into()),
..Default::default() ..Default::default()
@@ -278,7 +278,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone())) .get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
.await?; .await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone()); let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = "alertmanager-operated".to_string(); let backend_service = format!("alertmanager-operated");
let namespace = self.sender.namespace.clone(); let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore { let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name), name: fqdn!(&name),
@@ -295,7 +295,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone())) .get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
.await?; .await?;
let name = format!("{}-prometheus", self.sender.namespace.clone()); let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = "prometheus-operated".to_string(); let backend_service = format!("prometheus-operated");
let prometheus_ingress = K8sIngressScore { let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name), name: fqdn!(&name),
host: fqdn!(&prometheus_domain), host: fqdn!(&prometheus_domain),

View File

@@ -25,7 +25,7 @@ pub struct CephRemoveOsd {
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd { impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
fn name(&self) -> String { fn name(&self) -> String {
"CephRemoveOsdScore".to_string() format!("CephRemoveOsdScore")
} }
#[doc(hidden)] #[doc(hidden)]
@@ -118,14 +118,14 @@ impl CephRemoveOsdInterpret {
if let Some(status) = deployment.status { if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0); let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 { if ready_count >= 1 {
Ok(Outcome::success(format!( return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).", "'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count &toolbox_dep, ready_count
))) )));
} else { } else {
Err(InterpretError::new( return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(), "ceph-tool-box not ready in cluster".to_string(),
)) ));
} }
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
@@ -181,14 +181,15 @@ impl CephRemoveOsdInterpret {
) )
.await?; .await?;
if let Some(deployment) = dep if let Some(deployment) = dep {
&& let Some(status) = deployment.status if let Some(status) = deployment.status {
&& status.replicas.unwrap_or(1) == 0 if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
&& status.ready_replicas.unwrap_or(1) == 0 {
{ return Ok(Outcome::success(
return Ok(Outcome::success( "Deployment successfully scaled down.".to_string(),
"Deployment successfully scaled down.".to_string(), ));
)); }
}
} }
if start.elapsed() > timeout { if start.elapsed() > timeout {

View File

@@ -20,7 +20,7 @@ pub struct CephVerifyClusterHealth {
impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth { impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth {
fn name(&self) -> String { fn name(&self) -> String {
"CephValidateClusterHealth".to_string() format!("CephValidateClusterHealth")
} }
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
@@ -80,14 +80,14 @@ impl CephVerifyClusterHealthInterpret {
if let Some(status) = deployment.status { if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0); let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 { if ready_count >= 1 {
Ok(Outcome::success(format!( return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).", "'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count &toolbox_dep, ready_count
))) )));
} else { } else {
Err(InterpretError::new( return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(), "ceph-tool-box not ready in cluster".to_string(),
)) ));
} }
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
@@ -123,9 +123,9 @@ impl CephVerifyClusterHealthInterpret {
.await?; .await?;
if health.contains("HEALTH_OK") { if health.contains("HEALTH_OK") {
Ok(Outcome::success( return Ok(Outcome::success(
"Ceph Cluster in healthy state".to_string(), "Ceph Cluster in healthy state".to_string(),
)) ));
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
"Ceph cluster unhealthy {}", "Ceph cluster unhealthy {}",