Compare commits

..

2 Commits

Author SHA1 Message Date
1802b10ddf fix:translated documentaion notes into English 2025-10-23 15:31:45 -04:00
dd3f07e5b7 doc for removing worker flag from cp on UPI
All checks were successful
Run Check Script / check (pull_request) Successful in 1m13s
2025-10-09 15:28:42 -04:00
20 changed files with 179 additions and 103 deletions

View File

@@ -0,0 +1,56 @@
## **Remove Worker flag from OKD Control Planes**
### **Context**
On OKD user provisioned infrastructure the control plane nodes can have the flag node-role.kubernetes.io/worker which allows non critical workloads to be scheduled on the control-planes
### **Observed Symptoms**
- After adding HAProxy servers to the backend each back end appears down
- Traffic is redirected to the control planes instead of workers
- The pods router-default are incorrectly applied on the control planes rather than on the workers
- Pods are being scheduled on the control planes causing cluster instability
```
ss -tlnp | grep 80
```
- shows process haproxy is listening at 0.0.0.0:80 on cps
- same problem for port 443
- In namespace rook-ceph certain pods are deploted on cps rather than on worker nodes
### **Cause**
- when intalling UPI, the roles (master, worker) are not managed by the Machine Config operator and the cps are made schedulable by default.
### **Diagnostic**
check node labels:
```
oc get nodes --show-labels | grep control-plane
```
Inspecter kubelet configuration:
```
cat /etc/systemd/system/kubelet.service
```
find the line:
```
--node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node-role.kubernetes.io/worker
```
→ presence of label worker confirms the problem.
Verify the flag doesnt come from MCO
```
oc get machineconfig | grep rendered-master
```
**Solution:**
To make the control planes non schedulable you must patch the cluster scheduler resource
```
oc patch scheduler cluster --type merge -p '{"spec":{"mastersSchedulable":false}}'
```
after the patch is applied the workloads can be deplaced by draining the nodes
```
oc adm cordon <cp-node>
oc adm drain <cp-node> --ignore-daemonsets delete-emptydir-data
```

View File

@@ -3,7 +3,10 @@ use std::time::Duration;
use derive_new::new; use derive_new::new;
use k8s_openapi::{ use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope, ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod}, api::{
apps::v1::Deployment,
core::v1::{Pod, PodStatus},
},
}; };
use kube::{ use kube::{
Client, Config, Error, Resource, Client, Config, Error, Resource,
@@ -20,7 +23,7 @@ use kube::{
}; };
use log::{debug, error, trace}; use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use serde_json::json; use serde_json::{Value, json};
use similar::TextDiff; use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep}; use tokio::{io::AsyncReadExt, time::sleep};
@@ -68,7 +71,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced_with(self.client.clone(), &gvk) Api::default_namespaced_with(self.client.clone(), &gvk)
}; };
resource.get(name).await Ok(resource.get(name).await?)
} }
pub async fn get_deployment( pub async fn get_deployment(
@@ -81,7 +84,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
deps.get_opt(name).await Ok(deps.get_opt(name).await?)
} }
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> { pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
@@ -90,7 +93,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
pods.get_opt(name).await Ok(pods.get_opt(name).await?)
} }
pub async fn scale_deployment( pub async fn scale_deployment(
@@ -167,13 +170,15 @@ impl K8sClient {
loop { loop {
let pod = self.get_pod(pod_name, namespace).await?; let pod = self.get_pod(pod_name, namespace).await?;
if let Some(p) = pod if let Some(p) = pod {
&& let Some(status) = p.status if let Some(status) = p.status {
&& let Some(phase) = status.phase if let Some(phase) = status.phase {
&& phase.to_lowercase() == "running" if phase.to_lowercase() == "running" {
{
return Ok(()); return Ok(());
} }
}
}
}
if elapsed >= timeout_secs { if elapsed >= timeout_secs {
return Err(Error::Discovery(DiscoveryError::MissingResource(format!( return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
@@ -235,7 +240,7 @@ impl K8sClient {
if let Some(s) = status.status { if let Some(s) = status.status {
let mut stdout_buf = String::new(); let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout() { if let Some(mut stdout) = process.stdout().take() {
stdout stdout
.read_to_string(&mut stdout_buf) .read_to_string(&mut stdout_buf)
.await .await

View File

@@ -212,11 +212,11 @@ impl K8sAnywhereTopology {
.await?; .await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0); let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 { if ready_replicas >= 1 {
Ok(()) return Ok(());
} else { } else {
Err(PreparationError::new( return Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(), "openshift-ingress-operator not available".to_string(),
)) ));
} }
} }

View File

@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
impl InventoryRepositoryFactory { impl InventoryRepositoryFactory {
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> { pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
Ok(Box::new( Ok(Box::new(
SqliteInventoryRepository::new(&DATABASE_URL).await?, SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
)) ))
} }
} }

View File

@@ -36,7 +36,7 @@ impl HttpServer for OPNSenseFirewall {
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
let path = match &file.path { let path = match &file.path {
crate::data::FilePath::Relative(path) => { crate::data::FilePath::Relative(path) => {
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path) format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
} }
crate::data::FilePath::Absolute(path) => { crate::data::FilePath::Absolute(path) => {
return Err(ExecutorError::ConfigurationError(format!( return Err(ExecutorError::ConfigurationError(format!(

View File

@@ -182,13 +182,17 @@ pub(crate) fn get_health_check_for_backend(
let uppercase = binding.as_str(); let uppercase = binding.as_str();
match uppercase { match uppercase {
"TCP" => { "TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
&& !checkport.is_empty() if !checkport.is_empty() {
{
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else( return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| panic!("HAProxy check port should be a valid port number, got {checkport}"), |_| {
panic!(
"HAProxy check port should be a valid port number, got {checkport}"
)
},
)))); ))));
} }
}
Some(HealthCheck::TCP(None)) Some(HealthCheck::TCP(None))
} }
"HTTP" => { "HTTP" => {

View File

@@ -8,6 +8,7 @@ mod tftp;
use std::sync::Arc; use std::sync::Arc;
pub use management::*; pub use management::*;
use opnsense_config_xml::Host;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use crate::{executors::ExecutorError, topology::LogicalHost}; use crate::{executors::ExecutorError, topology::LogicalHost};

View File

@@ -1,8 +1,10 @@
use async_trait::async_trait; use async_trait::async_trait;
use kube::api::GroupVersionKind; use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use non_blank_string_rs::NonBlankString; use non_blank_string_rs::NonBlankString;
use serde::Serialize; use serde::Serialize;
use std::{str::FromStr, sync::Arc}; use serde::de::DeserializeOwned;
use std::{process::Command, str::FromStr, sync::Arc};
use crate::{ use crate::{
data::Version, data::Version,
@@ -10,7 +12,10 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository}, modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score, score::Score,
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient}, topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
},
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
@@ -114,13 +119,13 @@ impl ArgoInterpret {
match ic.data["status"]["domain"].as_str() { match ic.data["status"]["domain"].as_str() {
Some(domain) => return Ok(domain.to_string()), Some(domain) => return Ok(domain.to_string()),
None => Err(InterpretError::new("Could not find domain".to_string())), None => return Err(InterpretError::new("Could not find domain".to_string())),
} }
} }
false => { false => {
todo!() todo!()
} }
} };
} }
} }

View File

@@ -190,7 +190,7 @@ impl<
info!("Deploying {} to target {target:?}", self.application.name()); info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore { let score = ArgoHelmScore {
namespace: self.application.name().to_string(), namespace: format!("{}", self.application.name()),
openshift: true, openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig { argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
@@ -198,8 +198,8 @@ impl<
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()), helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None, values_overrides: None,
name: self.application.name().to_string(), name: format!("{}", self.application.name()),
namespace: self.application.name().to_string(), namespace: format!("{}", self.application.name()),
})], })],
}; };
score score

View File

@@ -3,6 +3,7 @@ use std::sync::Arc;
use crate::modules::application::{ use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome, Application, ApplicationFeature, InstallationError, InstallationOutcome,
}; };
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore; use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;

View File

@@ -194,10 +194,10 @@ impl RustWebapp {
Some(body_full(tar_data.into())), Some(body_full(tar_data.into())),
); );
while let Some(msg) = image_build_stream.next().await { while let Some(mut msg) = image_build_stream.next().await {
trace!("Got bollard msg {msg:?}"); trace!("Got bollard msg {msg:?}");
match msg { match msg {
Ok(msg) => { Ok(mut msg) => {
if let Some(progress) = msg.progress_detail { if let Some(progress) = msg.progress_detail {
info!( info!(
"Build progress {}/{}", "Build progress {}/{}",
@@ -511,23 +511,25 @@ ingress:
fs::write(chart_dir.join("values.yaml"), values_yaml)?; fs::write(chart_dir.join("values.yaml"), values_yaml)?;
// Create templates/_helpers.tpl // Create templates/_helpers.tpl
let helpers_tpl = r#" let helpers_tpl = format!(
{{/* r#"
{{{{/*
Expand the name of the chart. Expand the name of the chart.
*/}} */}}}}
{{- define "chart.name" -}} {{{{- define "chart.name" -}}}}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }} {{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
{{- end }} {{{{- end }}}}
{{/* {{{{/*
Create a default fully qualified app name. Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}} */}}}}
{{- define "chart.fullname" -}} {{{{- define "chart.fullname" -}}}}
{{- $name := default .Chart.Name $.Values.nameOverride }} {{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
{{- end }} {{{{- end }}}}
"#.to_string(); "#
);
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?; fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
// Create templates/service.yaml // Create templates/service.yaml

View File

@@ -66,7 +66,8 @@ impl HelmCommandExecutor {
.is_none() .is_none()
{ {
if self.chart.repo.is_none() { if self.chart.repo.is_none() {
return Err(std::io::Error::other( return Err(std::io::Error::new(
ErrorKind::Other,
"Chart doesn't exist locally and no repo specified", "Chart doesn't exist locally and no repo specified",
)); ));
} }
@@ -106,11 +107,11 @@ impl HelmCommandExecutor {
} }
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> { pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug if let Some(d) = self.debug {
&& d if d {
{
args.push("--debug".to_string()); args.push("--debug".to_string());
} }
}
let path = if let Some(p) = self.path { let path = if let Some(p) = self.path {
p p
@@ -233,29 +234,29 @@ impl HelmChart {
args.push(kv); args.push(kv);
} }
if let Some(crd) = self.include_crds if let Some(crd) = self.include_crds {
&& crd if crd {
{
args.push("--include-crds".to_string()); args.push("--include-crds".to_string());
} }
}
if let Some(st) = self.skip_tests if let Some(st) = self.skip_tests {
&& st if st {
{
args.push("--skip-tests".to_string()); args.push("--skip-tests".to_string());
} }
if let Some(sh) = self.skip_hooks
&& sh
{
args.push("--no-hooks".to_string());
} }
if let Some(d) = self.debug if let Some(sh) = self.skip_hooks {
&& d if sh {
{ args.push("--no-hooks".to_string());
}
}
if let Some(d) = self.debug {
if d {
args.push("--debug".to_string()); args.push("--debug".to_string());
} }
}
args args
} }

View File

@@ -63,7 +63,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
} }
for f in self.score.files.iter() { for f in self.score.files.iter() {
http_server.serve_file_content(f).await? http_server.serve_file_content(&f).await?
} }
http_server.commit_config().await?; http_server.commit_config().await?;

View File

@@ -92,7 +92,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
); );
return Err(InterpretError::new(format!( return Err(InterpretError::new(format!(
"Could not select host : {}", "Could not select host : {}",
e e.to_string()
))); )));
} }
} }

View File

@@ -9,7 +9,9 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
application::Application, application::Application,
monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability, monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
},
prometheus::prometheus::PrometheusApplicationMonitoring, prometheus::prometheus::PrometheusApplicationMonitoring,
}, },
score::Score, score::Score,

View File

@@ -1,8 +1,12 @@
use std::collections::BTreeMap;
use kube::CustomResource; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector; use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
LabelSelector, PrometheusSpec,
};
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1 /// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@@ -52,12 +52,6 @@ pub struct OKDSetup02BootstrapInterpret {
status: InterpretStatus, status: InterpretStatus,
} }
impl Default for OKDSetup02BootstrapInterpret {
fn default() -> Self {
Self::new()
}
}
impl OKDSetup02BootstrapInterpret { impl OKDSetup02BootstrapInterpret {
pub fn new() -> Self { pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap(); let version = Version::from("1.0.0").unwrap();
@@ -104,9 +98,9 @@ impl OKDSetup02BootstrapInterpret {
InterpretError::new(format!("Failed to create okd installation directory : {e}")) InterpretError::new(format!("Failed to create okd installation directory : {e}"))
})?; })?;
if !exit_status.success() { if !exit_status.success() {
return Err(InterpretError::new( return Err(InterpretError::new(format!(
"Failed to create okd installation directory".to_string(), "Failed to create okd installation directory"
)); )));
} else { } else {
info!( info!(
"Created OKD installation directory {}", "Created OKD installation directory {}",

View File

@@ -254,7 +254,7 @@ impl RHOBAlertingInterpret {
let stack = MonitoringStack { let stack = MonitoringStack {
metadata: ObjectMeta { metadata: ObjectMeta {
name: Some(format!("{}-monitoring", self.sender.namespace.clone())), name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
namespace: Some(self.sender.namespace.clone()), namespace: Some(self.sender.namespace.clone()),
labels: Some([("monitoring-stack".into(), "true".into())].into()), labels: Some([("monitoring-stack".into(), "true".into())].into()),
..Default::default() ..Default::default()
@@ -278,7 +278,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone())) .get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
.await?; .await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone()); let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = "alertmanager-operated".to_string(); let backend_service = format!("alertmanager-operated");
let namespace = self.sender.namespace.clone(); let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore { let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name), name: fqdn!(&name),
@@ -295,7 +295,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone())) .get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
.await?; .await?;
let name = format!("{}-prometheus", self.sender.namespace.clone()); let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = "prometheus-operated".to_string(); let backend_service = format!("prometheus-operated");
let prometheus_ingress = K8sIngressScore { let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name), name: fqdn!(&name),
host: fqdn!(&prometheus_domain), host: fqdn!(&prometheus_domain),

View File

@@ -25,7 +25,7 @@ pub struct CephRemoveOsd {
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd { impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
fn name(&self) -> String { fn name(&self) -> String {
"CephRemoveOsdScore".to_string() format!("CephRemoveOsdScore")
} }
#[doc(hidden)] #[doc(hidden)]
@@ -118,14 +118,14 @@ impl CephRemoveOsdInterpret {
if let Some(status) = deployment.status { if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0); let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 { if ready_count >= 1 {
Ok(Outcome::success(format!( return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).", "'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count &toolbox_dep, ready_count
))) )));
} else { } else {
Err(InterpretError::new( return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(), "ceph-tool-box not ready in cluster".to_string(),
)) ));
} }
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
@@ -181,15 +181,16 @@ impl CephRemoveOsdInterpret {
) )
.await?; .await?;
if let Some(deployment) = dep if let Some(deployment) = dep {
&& let Some(status) = deployment.status if let Some(status) = deployment.status {
&& status.replicas.unwrap_or(1) == 0 if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
&& status.ready_replicas.unwrap_or(1) == 0
{ {
return Ok(Outcome::success( return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(), "Deployment successfully scaled down.".to_string(),
)); ));
} }
}
}
if start.elapsed() > timeout { if start.elapsed() > timeout {
return Err(InterpretError::new(format!( return Err(InterpretError::new(format!(

View File

@@ -20,7 +20,7 @@ pub struct CephVerifyClusterHealth {
impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth { impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth {
fn name(&self) -> String { fn name(&self) -> String {
"CephValidateClusterHealth".to_string() format!("CephValidateClusterHealth")
} }
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
@@ -80,14 +80,14 @@ impl CephVerifyClusterHealthInterpret {
if let Some(status) = deployment.status { if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0); let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 { if ready_count >= 1 {
Ok(Outcome::success(format!( return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).", "'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count &toolbox_dep, ready_count
))) )));
} else { } else {
Err(InterpretError::new( return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(), "ceph-tool-box not ready in cluster".to_string(),
)) ));
} }
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
@@ -123,9 +123,9 @@ impl CephVerifyClusterHealthInterpret {
.await?; .await?;
if health.contains("HEALTH_OK") { if health.contains("HEALTH_OK") {
Ok(Outcome::success( return Ok(Outcome::success(
"Ceph Cluster in healthy state".to_string(), "Ceph Cluster in healthy state".to_string(),
)) ));
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
"Ceph cluster unhealthy {}", "Ceph cluster unhealthy {}",