Compare commits

..

6 Commits

Author SHA1 Message Date
6e53397a58 commit
All checks were successful
Run Check Script / check (pull_request) Successful in 1m22s
2025-10-02 17:11:02 -04:00
8baf75a4fd commit 2025-10-02 16:59:08 -04:00
d53d040bac commit 2025-10-02 16:56:17 -04:00
687f11b261 commit 2025-10-02 16:53:25 -04:00
58e609767b cargo fmt 2025-10-02 16:46:43 -04:00
f75765408d fix: clippy
Some checks failed
Run Check Script / check (pull_request) Failing after 35s
2025-10-02 16:25:07 -04:00
25 changed files with 133 additions and 252 deletions

View File

@@ -4,10 +4,9 @@ use derive_new::new;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod},
apimachinery::pkg::version::Info,
};
use kube::{
Client, Config, Discovery, Error, Resource,
Client, Config, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
@@ -21,7 +20,7 @@ use kube::{
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::{Value, json};
use serde_json::json;
use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep};
@@ -57,17 +56,6 @@ impl K8sClient {
})
}
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
let client: Client = self.client.clone();
let version_info: Info = client.apiserver_version().await?;
Ok(version_info)
}
pub async fn discovery(&self) -> Result<Discovery, Error> {
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
Ok(discovery)
}
pub async fn get_resource_json_value(
&self,
name: &str,
@@ -80,7 +68,7 @@ impl K8sClient {
} else {
Api::default_namespaced_with(self.client.clone(), &gvk)
};
Ok(resource.get(name).await?)
resource.get(name).await
}
pub async fn get_deployment(
@@ -93,7 +81,7 @@ impl K8sClient {
} else {
Api::default_namespaced(self.client.clone())
};
Ok(deps.get_opt(name).await?)
deps.get_opt(name).await
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
@@ -102,7 +90,7 @@ impl K8sClient {
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
pods.get_opt(name).await
}
pub async fn scale_deployment(
@@ -179,14 +167,12 @@ impl K8sClient {
loop {
let pod = self.get_pod(pod_name, namespace).await?;
if let Some(p) = pod {
if let Some(status) = p.status {
if let Some(phase) = status.phase {
if phase.to_lowercase() == "running" {
return Ok(());
}
}
}
if let Some(p) = pod
&& let Some(status) = p.status
&& let Some(phase) = status.phase
&& phase.to_lowercase() == "running"
{
return Ok(());
}
if elapsed >= timeout_secs {
@@ -249,7 +235,7 @@ impl K8sClient {
if let Some(s) = status.status {
let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout().take() {
if let Some(mut stdout) = process.stdout() {
stdout
.read_to_string(&mut stdout_buf)
.await

View File

@@ -47,13 +47,6 @@ struct K8sState {
message: String,
}
#[derive(Debug, Clone)]
pub enum KubernetesDistribution {
OpenshiftFamily,
K3sFamily,
Default,
}
#[derive(Debug, Clone)]
enum K8sSource {
LocalK3d,
@@ -64,7 +57,6 @@ enum K8sSource {
pub struct K8sAnywhereTopology {
k8s_state: Arc<OnceCell<Option<K8sState>>>,
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
flavour: Arc<OnceCell<KubernetesDistribution>>,
config: Arc<K8sAnywhereConfig>,
}
@@ -170,7 +162,6 @@ impl K8sAnywhereTopology {
Self {
k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()),
flavour: Arc::new(OnceCell::new()),
config: Arc::new(K8sAnywhereConfig::from_env()),
}
}
@@ -179,42 +170,10 @@ impl K8sAnywhereTopology {
Self {
k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()),
flavour: Arc::new(OnceCell::new()),
config: Arc::new(config),
}
}
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
self.flavour
.get_or_try_init(async || {
let client = self.k8s_client().await.unwrap();
let discovery = client.discovery().await.map_err(|e| {
PreparationError::new(format!("Could not discover API groups: {}", e))
})?;
let version = client.get_apiserver_version().await.map_err(|e| {
PreparationError::new(format!("Could not get server version: {}", e))
})?;
// OpenShift / OKD
if discovery
.groups()
.any(|g| g.name() == "project.openshift.io")
{
return Ok(KubernetesDistribution::OpenshiftFamily);
}
// K3d / K3s
if version.git_version.contains("k3s") {
return Ok(KubernetesDistribution::K3sFamily);
}
return Ok(KubernetesDistribution::Default);
})
.await
}
async fn get_cluster_observability_operator_prometheus_application_score(
&self,
sender: RHOBObservability,
@@ -253,11 +212,11 @@ impl K8sAnywhereTopology {
.await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 {
return Ok(());
Ok(())
} else {
return Err(PreparationError::new(
Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(),
));
))
}
}

View File

@@ -186,7 +186,7 @@ impl TopologyState {
}
}
#[derive(Debug, PartialEq)]
#[derive(Debug)]
pub enum DeploymentTarget {
LocalDev,
Staging,

View File

@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
impl InventoryRepositoryFactory {
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
Ok(Box::new(
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
SqliteInventoryRepository::new(&DATABASE_URL).await?,
))
}
}

View File

@@ -36,7 +36,7 @@ impl HttpServer for OPNSenseFirewall {
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
let path = match &file.path {
crate::data::FilePath::Relative(path) => {
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path)
}
crate::data::FilePath::Absolute(path) => {
return Err(ExecutorError::ConfigurationError(format!(

View File

@@ -182,16 +182,12 @@ pub(crate) fn get_health_check_for_backend(
let uppercase = binding.as_str();
match uppercase {
"TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
if !checkport.is_empty() {
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| {
panic!(
"HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
}
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref()
&& !checkport.is_empty()
{
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| panic!("HAProxy check port should be a valid port number, got {checkport}"),
))));
}
Some(HealthCheck::TCP(None))
}

View File

@@ -8,7 +8,6 @@ mod tftp;
use std::sync::Arc;
pub use management::*;
use opnsense_config_xml::Host;
use tokio::sync::RwLock;
use crate::{executors::ExecutorError, topology::LogicalHost};

View File

@@ -1,10 +1,8 @@
use async_trait::async_trait;
use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use kube::api::GroupVersionKind;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use serde::de::DeserializeOwned;
use std::{process::Command, str::FromStr, sync::Arc};
use std::{str::FromStr, sync::Arc};
use crate::{
data::Version,
@@ -12,10 +10,7 @@ use crate::{
inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score,
topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
},
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
};
use harmony_types::id::Id;
@@ -57,10 +52,8 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
let k8s_client = topology.k8s_client().await?;
let svc = format!("argo-{}", self.score.namespace.clone());
let domain = topology.get_domain(&svc).await?;
// FIXME we now have a way to know if we're running on openshift family
let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
helm_score.interpret(inventory, topology).await?;
@@ -121,13 +114,13 @@ impl ArgoInterpret {
match ic.data["status"]["domain"].as_str() {
Some(domain) => return Ok(domain.to_string()),
None => return Err(InterpretError::new("Could not find domain".to_string())),
None => Err(InterpretError::new("Could not find domain".to_string())),
}
}
false => {
todo!()
}
};
}
}
}

View File

@@ -10,11 +10,12 @@ use crate::{
data::Version,
inventory::Inventory,
modules::application::{
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
},
score::Score,
topology::{
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
},
};
@@ -46,11 +47,11 @@ use crate::{
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
/// - Kubernetes for runtime orchestration
#[derive(Debug, Default, Clone)]
pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
pub application: Arc<A>,
}
impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
async fn deploy_to_local_k3d(
&self,
app_name: String,
@@ -136,7 +137,7 @@ impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
#[async_trait]
impl<
A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
A: OCICompliant + HelmPackage + Clone + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
> ApplicationFeature<T> for PackagingDeployment<A>
{
@@ -145,15 +146,10 @@ impl<
topology: &T,
) -> Result<InstallationOutcome, InstallationError> {
let image = self.application.image_name();
let domain = if topology.current_target() == DeploymentTarget::Production {
self.application.dns()
} else {
topology
let domain = topology
.get_domain(&self.application.name())
.await
.map_err(|e| e.to_string())?
};
.map_err(|e| e.to_string())?;
// TODO Write CI/CD workflow files
// we can autotedect the CI type using the remote url (default to github action for github
@@ -194,7 +190,7 @@ impl<
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: format!("{}", self.application.name()),
namespace: self.application.name().to_string(),
openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
@@ -202,8 +198,8 @@ impl<
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None,
name: format!("{}", self.application.name()),
namespace: format!("{}", self.application.name()),
name: self.application.name().to_string(),
namespace: self.application.name().to_string(),
})],
};
score

View File

@@ -3,7 +3,6 @@ use std::sync::Arc;
use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome,
};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;

View File

@@ -2,7 +2,6 @@ mod feature;
pub mod features;
pub mod oci;
mod rust;
mod webapp;
use std::sync::Arc;
pub use feature::*;

View File

@@ -16,7 +16,6 @@ use tar::{Builder, Header};
use walkdir::WalkDir;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::modules::application::webapp::Webapp;
use crate::{score::Score, topology::Topology};
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
@@ -61,10 +60,6 @@ pub struct RustWebapp {
pub project_root: PathBuf,
pub service_port: u32,
pub framework: Option<RustWebFramework>,
/// Host name that will be used in production environment.
///
/// This is the place to put the public host name if this is a public facing webapp.
pub dns: String,
}
impl Application for RustWebapp {
@@ -73,12 +68,6 @@ impl Application for RustWebapp {
}
}
impl Webapp for RustWebapp {
fn dns(&self) -> String {
self.dns.clone()
}
}
#[async_trait]
impl HelmPackage for RustWebapp {
async fn build_push_helm_package(
@@ -205,10 +194,10 @@ impl RustWebapp {
Some(body_full(tar_data.into())),
);
while let Some(mut msg) = image_build_stream.next().await {
while let Some(msg) = image_build_stream.next().await {
trace!("Got bollard msg {msg:?}");
match msg {
Ok(mut msg) => {
Ok(msg) => {
if let Some(progress) = msg.progress_detail {
info!(
"Build progress {}/{}",
@@ -268,6 +257,7 @@ impl RustWebapp {
".harmony_generated",
"harmony",
"node_modules",
"Dockerfile.harmony",
];
let mut entries: Vec<_> = WalkDir::new(project_root)
.into_iter()
@@ -471,76 +461,73 @@ impl RustWebapp {
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
let app_name = &self.name;
let service_port = self.service_port;
// Create Chart.yaml
let chart_yaml = format!(
r#"
apiVersion: v2
name: {chart_name}
description: A Helm chart for the {app_name} web application.
name: {}
description: A Helm chart for the {} web application.
type: application
version: 0.2.0
appVersion: "{image_tag}"
version: 0.1.0
appVersion: "{}"
"#,
chart_name, self.name, image_tag
);
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
// Create values.yaml
let values_yaml = format!(
r#"
# Default values for {chart_name}.
# Default values for {}.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: {image_repo}
repository: {}
pullPolicy: IfNotPresent
# Overridden by the chart's appVersion
tag: "{image_tag}"
tag: "{}"
service:
type: ClusterIP
port: {service_port}
port: {}
ingress:
enabled: true
tls: true
# Annotations for cert-manager to handle SSL.
annotations:
# Add other annotations like nginx ingress class if needed
# kubernetes.io/ingress.class: nginx
hosts:
- host: {domain}
- host: {}
paths:
- path: /
pathType: ImplementationSpecific
"#,
chart_name, image_repo, image_tag, self.service_port, domain,
);
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
// Create templates/_helpers.tpl
let helpers_tpl = format!(
r#"
{{{{/*
let helpers_tpl = r#"
{{/*
Expand the name of the chart.
*/}}}}
{{{{- define "chart.name" -}}}}
{{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
{{{{- end }}}}
*/}}
{{- define "chart.name" -}}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{{{/*
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}}}
{{{{- define "chart.fullname" -}}}}
{{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
{{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
{{{{- end }}}}
"#
);
*/}}
{{- define "chart.fullname" -}}
{{- $name := default .Chart.Name $.Values.nameOverride }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
"#.to_string();
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
// Create templates/service.yaml
@@ -594,11 +581,7 @@ spec:
);
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
let service_port = self.service_port;
// Create templates/ingress.yaml
// TODO get issuer name and tls config from topology as it may be different from one
// cluster to another, also from one version to another
let ingress_yaml = format!(
r#"
{{{{- if $.Values.ingress.enabled -}}}}
@@ -611,11 +594,13 @@ metadata:
spec:
{{{{- if $.Values.ingress.tls }}}}
tls:
- secretName: {{{{ include "chart.fullname" . }}}}-tls
hosts:
{{{{- range $.Values.ingress.hosts }}}}
- {{{{ .host | quote }}}}
{{{{- range $.Values.ingress.tls }}}}
- hosts:
{{{{- range .hosts }}}}
- {{{{ . | quote }}}}
{{{{- end }}}}
secretName: {{{{ .secretName }}}}
{{{{- end }}}}
{{{{- end }}}}
rules:
{{{{- range $.Values.ingress.hosts }}}}
@@ -629,11 +614,12 @@ spec:
service:
name: {{{{ include "chart.fullname" $ }}}}
port:
number: {{{{ $.Values.service.port | default {service_port} }}}}
number: {{{{ $.Values.service.port | default {} }}}}
{{{{- end }}}}
{{{{- end }}}}
{{{{- end }}}}
"#,
self.service_port
);
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;

View File

@@ -1,7 +0,0 @@
use super::Application;
use async_trait::async_trait;
#[async_trait]
pub trait Webapp: Application {
fn dns(&self) -> String;
}

View File

@@ -1,20 +0,0 @@
/// Discover the current ArgoCD setup
///
/// 1. No argo installed
/// 2. Argo installed in current namespace
/// 3. Argo installed in different namespace (assuming cluster wide access)
///
/// For now we will go ahead with this very basic logic, there are many intricacies that can be
/// dealt with later, such as multitenant management in a single argo instance, credentials setup t
#[async_trait]
pub trait ArgoCD {
async fn ensure_installed() {
}
}
struct CurrentNamespaceArgo;
impl ArgoCD for CurrentNamespaceArgo {
}

View File

@@ -1,2 +0,0 @@
mod discover;
pub use discover::*;

View File

@@ -66,8 +66,7 @@ impl HelmCommandExecutor {
.is_none()
{
if self.chart.repo.is_none() {
return Err(std::io::Error::new(
ErrorKind::Other,
return Err(std::io::Error::other(
"Chart doesn't exist locally and no repo specified",
));
}
@@ -107,10 +106,10 @@ impl HelmCommandExecutor {
}
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
if let Some(d) = self.debug
&& d
{
args.push("--debug".to_string());
}
let path = if let Some(p) = self.path {
@@ -234,28 +233,28 @@ impl HelmChart {
args.push(kv);
}
if let Some(crd) = self.include_crds {
if crd {
args.push("--include-crds".to_string());
}
if let Some(crd) = self.include_crds
&& crd
{
args.push("--include-crds".to_string());
}
if let Some(st) = self.skip_tests {
if st {
args.push("--skip-tests".to_string());
}
if let Some(st) = self.skip_tests
&& st
{
args.push("--skip-tests".to_string());
}
if let Some(sh) = self.skip_hooks {
if sh {
args.push("--no-hooks".to_string());
}
if let Some(sh) = self.skip_hooks
&& sh
{
args.push("--no-hooks".to_string());
}
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string());
}
if let Some(d) = self.debug
&& d
{
args.push("--debug".to_string());
}
args

View File

@@ -63,7 +63,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
}
for f in self.score.files.iter() {
http_server.serve_file_content(&f).await?
http_server.serve_file_content(f).await?
}
http_server.commit_config().await?;

View File

@@ -92,7 +92,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
);
return Err(InterpretError::new(format!(
"Could not select host : {}",
e.to_string()
e
)));
}
}

View File

@@ -17,4 +17,3 @@ pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;
pub mod argocd;

View File

@@ -9,9 +9,7 @@ use crate::{
inventory::Inventory,
modules::{
application::Application,
monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
},
monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability,
prometheus::prometheus::PrometheusApplicationMonitoring,
},
score::Score,

View File

@@ -1,12 +1,8 @@
use std::collections::BTreeMap;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
LabelSelector, PrometheusSpec,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@@ -52,6 +52,12 @@ pub struct OKDSetup02BootstrapInterpret {
status: InterpretStatus,
}
impl Default for OKDSetup02BootstrapInterpret {
fn default() -> Self {
Self::new()
}
}
impl OKDSetup02BootstrapInterpret {
pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap();
@@ -98,9 +104,9 @@ impl OKDSetup02BootstrapInterpret {
InterpretError::new(format!("Failed to create okd installation directory : {e}"))
})?;
if !exit_status.success() {
return Err(InterpretError::new(format!(
"Failed to create okd installation directory"
)));
return Err(InterpretError::new(
"Failed to create okd installation directory".to_string(),
));
} else {
info!(
"Created OKD installation directory {}",

View File

@@ -254,7 +254,7 @@ impl RHOBAlertingInterpret {
let stack = MonitoringStack {
metadata: ObjectMeta {
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
name: Some(format!("{}-monitoring", self.sender.namespace.clone())),
namespace: Some(self.sender.namespace.clone()),
labels: Some([("monitoring-stack".into(), "true".into())].into()),
..Default::default()
@@ -278,7 +278,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = format!("alertmanager-operated");
let backend_service = "alertmanager-operated".to_string();
let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name),
@@ -295,7 +295,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = format!("prometheus-operated");
let backend_service = "prometheus-operated".to_string();
let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&prometheus_domain),

View File

@@ -25,7 +25,7 @@ pub struct CephRemoveOsd {
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
fn name(&self) -> String {
format!("CephRemoveOsdScore")
"CephRemoveOsdScore".to_string()
}
#[doc(hidden)]
@@ -118,14 +118,14 @@ impl CephRemoveOsdInterpret {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
return Ok(Outcome::success(format!(
Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count
)));
)))
} else {
return Err(InterpretError::new(
Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(),
));
))
}
} else {
Err(InterpretError::new(format!(
@@ -181,15 +181,14 @@ impl CephRemoveOsdInterpret {
)
.await?;
if let Some(deployment) = dep {
if let Some(status) = deployment.status {
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
{
return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(),
));
}
}
if let Some(deployment) = dep
&& let Some(status) = deployment.status
&& status.replicas.unwrap_or(1) == 0
&& status.ready_replicas.unwrap_or(1) == 0
{
return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(),
));
}
if start.elapsed() > timeout {

View File

@@ -20,7 +20,7 @@ pub struct CephVerifyClusterHealth {
impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth {
fn name(&self) -> String {
format!("CephValidateClusterHealth")
"CephValidateClusterHealth".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
@@ -80,14 +80,14 @@ impl CephVerifyClusterHealthInterpret {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
return Ok(Outcome::success(format!(
Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count
)));
)))
} else {
return Err(InterpretError::new(
Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(),
));
))
}
} else {
Err(InterpretError::new(format!(
@@ -123,9 +123,9 @@ impl CephVerifyClusterHealthInterpret {
.await?;
if health.contains("HEALTH_OK") {
return Ok(Outcome::success(
Ok(Outcome::success(
"Ceph Cluster in healthy state".to_string(),
));
))
} else {
Err(InterpretError::new(format!(
"Ceph cluster unhealthy {}",