feat: score for preparing rook ceph cluster to remove drive based on rook-ceph-osd deployment name added functions to K8sclient to be able to scale deployment to a desired replicaset number and get pod based on name and namespace

This commit is contained in:
Willem 2025-08-15 14:51:16 -04:00
parent 67f3a23071
commit b43ca7c740
5 changed files with 115 additions and 0 deletions

View File

@ -17,6 +17,7 @@ use kube::{
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::json;
use similar::TextDiff;
#[derive(new, Clone)]
@ -51,6 +52,38 @@ impl K8sClient {
})
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
let pods: Api<Pod> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
}
pub async fn scale_deployment(
&self,
name: &str,
namespace: Option<&str>,
replicas: u32,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let patch = json!({
"spec": {
"replicas": replicas
}
});
let pp = PatchParams::default();
let scale = Patch::Apply(&patch);
deployments.patch_scale(name, &pp, &scale).await?;
Ok(())
}
pub async fn wait_until_deployment_ready(
&self,
name: String,

View File

@ -14,5 +14,6 @@ pub mod monitoring;
pub mod okd;
pub mod opnsense;
pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;

View File

@ -0,0 +1,79 @@
use std::process::Command;
use async_trait::async_trait;
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology},
};
#[derive(Debug, Clone, Serialize)]
pub struct PrepCephOsdReplacement {
osd_name: String,
rook_ceph_namespace: String,
}
impl<T: Topology + K8sclient> Score<T> for PrepCephOsdReplacement {
fn name(&self) -> String {
format!("CephOsdReplacementScore")
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(PrepCephOsdReplacementInterpret {
score: self.clone(),
})
}
}
#[derive(Debug, Clone)]
pub struct PrepCephOsdReplacementInterpret {
score: PrepCephOsdReplacement,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for PrepCephOsdReplacementInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
client
.scale_deployment(
&self.score.osd_name,
Some(&self.score.rook_ceph_namespace),
0,
)
.await?;
client
.get_pod(&self.score.osd_name, Some(&self.score.rook_ceph_namespace))
.await?;
Ok(Outcome::success(
"Successfully prepared rook-ceph-cluster for disk replacement".to_string(),
))
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl PrepCephOsdReplacementInterpret {}

View File

@ -0,0 +1 @@
pub mod ceph_osd_replacement_score;

View File

@ -0,0 +1 @@
pub mod ceph;