reafactor/k8sclient #243
2495
Cargo.lock
generated
2495
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -18,8 +18,7 @@ members = [
|
|||||||
"adr/agent_discovery/mdns",
|
"adr/agent_discovery/mdns",
|
||||||
"brocade",
|
"brocade",
|
||||||
"harmony_agent",
|
"harmony_agent",
|
||||||
"harmony_agent/deploy", "harmony_node_readiness",
|
"harmony_agent/deploy", "harmony_node_readiness", "harmony-k8s",
|
||||||
"examples/*",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -38,6 +37,8 @@ tokio = { version = "1.40", features = [
|
|||||||
"macros",
|
"macros",
|
||||||
"rt-multi-thread",
|
"rt-multi-thread",
|
||||||
] }
|
] }
|
||||||
|
tokio-retry = "0.3.0"
|
||||||
|
tokio-util = "0.7.15"
|
||||||
cidr = { features = ["serde"], version = "0.2" }
|
cidr = { features = ["serde"], version = "0.2" }
|
||||||
russh = "0.45"
|
russh = "0.45"
|
||||||
russh-keys = "0.45"
|
russh-keys = "0.45"
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use super::BrocadeClient;
|
use super::BrocadeClient;
|
||||||
use crate::{
|
use crate::{
|
||||||
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
||||||
PortChannelId, PortOperatingMode, SecurityLevel, parse_brocade_mac_address,
|
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
shell::BrocadeShell,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use regex::Regex;
|
|||||||
use crate::{
|
use crate::{
|
||||||
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||||
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||||
SecurityLevel, parse_brocade_mac_address, shell::BrocadeShell,
|
parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::cert_manager::{
|
modules::cert_manager::{
|
||||||
capability::CertificateManagementConfig, score_cert_management::CertificateManagementScore,
|
capability::CertificateManagementConfig, score_certificate::CertificateScore,
|
||||||
score_certificate::CertificateScore, score_issuer::CertificateIssuerScore,
|
score_issuer::CertificateIssuerScore,
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -10,9 +10,10 @@ publish = false
|
|||||||
harmony = { path = "../../harmony" }
|
harmony = { path = "../../harmony" }
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
harmony_types = { path = "../../harmony_types" }
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
harmony-k8s = { path = "../../harmony-k8s" }
|
||||||
cidr.workspace = true
|
cidr.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use harmony::topology::k8s::{DrainOptions, K8sClient};
|
use harmony_k8s::{DrainOptions, K8sClient};
|
||||||
use log::{info, trace};
|
use log::{info, trace};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
|
|||||||
@@ -10,9 +10,10 @@ publish = false
|
|||||||
harmony = { path = "../../harmony" }
|
harmony = { path = "../../harmony" }
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
harmony_types = { path = "../../harmony_types" }
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
harmony-k8s = { path = "../../harmony-k8s" }
|
||||||
cidr.workspace = true
|
cidr.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use harmony::topology::k8s::{DrainOptions, K8sClient, NodeFile};
|
use harmony_k8s::{K8sClient, NodeFile};
|
||||||
use log::{info, trace};
|
use log::{info, trace};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use harmony::{
|
|||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let openbao = OpenbaoScore {
|
let openbao = OpenbaoScore {
|
||||||
host: String::new(),
|
host: "openbao.sebastien.sto1.nationtech.io".to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
harmony_cli::run(
|
harmony_cli::run(
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
|
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
|
||||||
@@ -9,7 +7,7 @@ use harmony::{
|
|||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
|
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
|
||||||
let cnpg_operator = CloudNativePgOperatorScore::default();
|
let cnpg_operator = CloudNativePgOperatorScore::default_openshift();
|
||||||
|
|
||||||
harmony_cli::run(
|
harmony_cli::run(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
|
|||||||
@@ -1,22 +1,13 @@
|
|||||||
use std::{
|
use std::sync::Arc;
|
||||||
net::{IpAddr, Ipv4Addr},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cidr::Ipv4Cidr;
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
executors::ExecutorError,
|
executors::ExecutorError,
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::opnsense::node_exporter::NodeExporterScore,
|
modules::opnsense::node_exporter::NodeExporterScore,
|
||||||
topology::{
|
topology::{PreparationError, PreparationOutcome, Topology, node_exporter::NodeExporter},
|
||||||
HAClusterTopology, LogicalHost, PreparationError, PreparationOutcome, Topology,
|
|
||||||
UnmanagedRouter, node_exporter::NodeExporter,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, ipv4, mac_address};
|
use harmony_macros::ip;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct OpnSenseTopology {
|
struct OpnSenseTopology {
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::postgresql::{
|
modules::postgresql::{
|
||||||
K8sPostgreSQLScore, PostgreSQLConnectionScore, PublicPostgreSQLScore,
|
PostgreSQLConnectionScore, PublicPostgreSQLScore, capability::PostgreSQLConfig,
|
||||||
capability::PostgreSQLConfig,
|
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
|||||||
14
examples/zitadel/Cargo.toml
Normal file
14
examples/zitadel/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-zitadel"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
tokio.workspace = true
|
||||||
|
url.workspace = true
|
||||||
20
examples/zitadel/src/main.rs
Normal file
20
examples/zitadel/src/main.rs
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::Inventory, modules::zitadel::ZitadelScore, topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let zitadel = ZitadelScore {
|
||||||
|
host: "sso.sto1.nationtech.io".to_string(),
|
||||||
|
zitadel_version: "v4.12.1".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(zitadel)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
BIN
examples/zitadel/zitadel-9.24.0.tgz
Normal file
BIN
examples/zitadel/zitadel-9.24.0.tgz
Normal file
Binary file not shown.
23
harmony-k8s/Cargo.toml
Normal file
23
harmony-k8s/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
[package]
|
||||||
|
name = "harmony-k8s"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
kube.workspace = true
|
||||||
|
k8s-openapi.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
tokio-retry.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
serde_yaml.workspace = true
|
||||||
|
log.workspace = true
|
||||||
|
similar.workspace = true
|
||||||
|
reqwest.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
inquire.workspace = true
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
pretty_assertions.workspace = true
|
||||||
593
harmony-k8s/src/apply.rs
Normal file
593
harmony-k8s/src/apply.rs
Normal file
@@ -0,0 +1,593 @@
|
|||||||
|
use kube::{
|
||||||
|
Client, Error, Resource,
|
||||||
|
api::{
|
||||||
|
Api, ApiResource, DynamicObject, GroupVersionKind, Patch, PatchParams, PostParams,
|
||||||
|
ResourceExt,
|
||||||
|
},
|
||||||
|
core::ErrorResponse,
|
||||||
|
discovery::Scope,
|
||||||
|
error::DiscoveryError,
|
||||||
|
};
|
||||||
|
use log::{debug, error, trace, warn};
|
||||||
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
|
use serde_json::Value;
|
||||||
|
use similar::TextDiff;
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
use crate::client::K8sClient;
|
||||||
|
use crate::helper;
|
||||||
|
use crate::types::WriteMode;
|
||||||
|
|
||||||
|
/// The field-manager token sent with every server-side apply request.
|
||||||
|
pub const FIELD_MANAGER: &str = "harmony-k8s";
|
||||||
|
|
||||||
|
// ── Private helpers ──────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Serialise any `Serialize` payload to a [`DynamicObject`] via JSON.
|
||||||
|
fn to_dynamic<T: Serialize>(payload: &T) -> Result<DynamicObject, Error> {
|
||||||
|
serde_json::from_value(serde_json::to_value(payload).map_err(Error::SerdeError)?)
|
||||||
|
.map_err(Error::SerdeError)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the current resource, display a unified diff against `payload`, and
|
||||||
|
/// return `()`. All output goes to stdout (same behaviour as before).
|
||||||
|
///
|
||||||
|
/// A 404 is treated as "resource would be created" — not an error.
|
||||||
|
async fn show_dry_run<T: Serialize>(
|
||||||
|
api: &Api<DynamicObject>,
|
||||||
|
name: &str,
|
||||||
|
payload: &T,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let new_yaml = serde_yaml::to_string(payload)
|
||||||
|
.unwrap_or_else(|_| "Failed to serialize new resource".to_string());
|
||||||
|
|
||||||
|
match api.get(name).await {
|
||||||
|
Ok(current) => {
|
||||||
|
println!("\nDry-run for resource: '{name}'");
|
||||||
|
let mut current_val = serde_yaml::to_value(¤t).unwrap_or(serde_yaml::Value::Null);
|
||||||
|
if let Some(map) = current_val.as_mapping_mut() {
|
||||||
|
map.remove(&serde_yaml::Value::String("status".to_string()));
|
||||||
|
}
|
||||||
|
let current_yaml = serde_yaml::to_string(¤t_val)
|
||||||
|
.unwrap_or_else(|_| "Failed to serialize current resource".to_string());
|
||||||
|
|
||||||
|
if current_yaml == new_yaml {
|
||||||
|
println!("No changes detected.");
|
||||||
|
} else {
|
||||||
|
println!("Changes detected:");
|
||||||
|
let diff = TextDiff::from_lines(¤t_yaml, &new_yaml);
|
||||||
|
for change in diff.iter_all_changes() {
|
||||||
|
let sign = match change.tag() {
|
||||||
|
similar::ChangeTag::Delete => "-",
|
||||||
|
similar::ChangeTag::Insert => "+",
|
||||||
|
similar::ChangeTag::Equal => " ",
|
||||||
|
};
|
||||||
|
print!("{sign}{change}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||||
|
println!("\nDry-run for new resource: '{name}'");
|
||||||
|
println!("Resource does not exist. Would be created:");
|
||||||
|
for line in new_yaml.lines() {
|
||||||
|
println!("+{line}");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to fetch resource '{name}' for dry-run: {e}");
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute the real (non-dry-run) apply, respecting [`WriteMode`].
|
||||||
|
async fn do_apply<T: Serialize + std::fmt::Debug>(
|
||||||
|
api: &Api<DynamicObject>,
|
||||||
|
name: &str,
|
||||||
|
payload: &T,
|
||||||
|
patch_params: &PatchParams,
|
||||||
|
write_mode: &WriteMode,
|
||||||
|
) -> Result<DynamicObject, Error> {
|
||||||
|
match write_mode {
|
||||||
|
WriteMode::CreateOrUpdate => {
|
||||||
|
// TODO refactor this arm to perform self.update and if fail with 404 self.create
|
||||||
|
// This will avoid the repetition of the api.patch and api.create calls within this
|
||||||
|
// function body. This makes the code more maintainable
|
||||||
|
match api.patch(name, patch_params, &Patch::Apply(payload)).await {
|
||||||
|
Ok(obj) => Ok(obj),
|
||||||
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||||
|
debug!("Resource '{name}' not found via SSA, falling back to POST");
|
||||||
|
let dyn_obj = to_dynamic(payload)?;
|
||||||
|
api.create(&PostParams::default(), &dyn_obj)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!("Failed to create '{name}': {e}");
|
||||||
|
e
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to apply '{name}': {e}");
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
WriteMode::Create => {
|
||||||
|
let dyn_obj = to_dynamic(payload)?;
|
||||||
|
api.create(&PostParams::default(), &dyn_obj)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!("Failed to create '{name}': {e}");
|
||||||
|
e
|
||||||
|
})
|
||||||
|
}
|
||||||
|
WriteMode::Update => match api.patch(name, patch_params, &Patch::Apply(payload)).await {
|
||||||
|
Ok(obj) => Ok(obj),
|
||||||
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => Err(Error::Api(ErrorResponse {
|
||||||
|
code: 404,
|
||||||
|
message: format!("Resource '{name}' not found and WriteMode is UpdateOnly"),
|
||||||
|
reason: "NotFound".to_string(),
|
||||||
|
status: "Failure".to_string(),
|
||||||
|
})),
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to update '{name}': {e}");
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Public API ───────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
impl K8sClient {
|
||||||
|
/// Server-side apply: create if absent, update if present.
|
||||||
|
/// Equivalent to `kubectl apply`.
|
||||||
|
pub async fn apply<K>(&self, resource: &K, namespace: Option<&str>) -> Result<K, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + Serialize,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
self.apply_with_strategy(resource, namespace, WriteMode::CreateOrUpdate)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST only — returns an error if the resource already exists.
|
||||||
|
pub async fn create<K>(&self, resource: &K, namespace: Option<&str>) -> Result<K, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + Serialize,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
self.apply_with_strategy(resource, namespace, WriteMode::Create)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Server-side apply only — returns an error if the resource does not exist.
|
||||||
|
pub async fn update<K>(&self, resource: &K, namespace: Option<&str>) -> Result<K, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + Serialize,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
self.apply_with_strategy(resource, namespace, WriteMode::Update)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn apply_with_strategy<K>(
|
||||||
|
&self,
|
||||||
|
resource: &K,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
write_mode: WriteMode,
|
||||||
|
) -> Result<K, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + Serialize,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
debug!(
|
||||||
|
"apply_with_strategy: {:?} ns={:?}",
|
||||||
|
resource.meta().name,
|
||||||
|
namespace
|
||||||
|
);
|
||||||
|
trace!("{:#}", serde_json::to_value(resource).unwrap_or_default());
|
||||||
|
|
||||||
|
let dyntype = K::DynamicType::default();
|
||||||
|
let gvk = GroupVersionKind {
|
||||||
|
group: K::group(&dyntype).to_string(),
|
||||||
|
version: K::version(&dyntype).to_string(),
|
||||||
|
kind: K::kind(&dyntype).to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let discovery = self.discovery().await?;
|
||||||
|
let (ar, caps) = discovery.resolve_gvk(&gvk).ok_or_else(|| {
|
||||||
|
Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Cannot resolve GVK: {gvk:?}"
|
||||||
|
)))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let effective_ns = if caps.scope == Scope::Cluster {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
namespace.or_else(|| resource.meta().namespace.as_deref())
|
||||||
|
};
|
||||||
|
|
||||||
|
let api: Api<DynamicObject> =
|
||||||
|
get_dynamic_api(ar, caps, self.client.clone(), effective_ns, false);
|
||||||
|
|
||||||
|
let name = resource
|
||||||
|
.meta()
|
||||||
|
.name
|
||||||
|
.as_deref()
|
||||||
|
.expect("Kubernetes resource must have a name");
|
||||||
|
|
||||||
|
if self.dry_run {
|
||||||
|
show_dry_run(&api, name, resource).await?;
|
||||||
|
return Ok(resource.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let patch_params = PatchParams::apply(FIELD_MANAGER);
|
||||||
|
do_apply(&api, name, resource, &patch_params, &write_mode)
|
||||||
|
.await
|
||||||
|
.and_then(helper::dyn_to_typed)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Applies resources in order, one at a time
|
||||||
|
pub async fn apply_many<K>(&self, resources: &[K], ns: Option<&str>) -> Result<Vec<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + Serialize,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
let mut result = Vec::new();
|
||||||
|
for r in resources.iter() {
|
||||||
|
let res = self.apply(r, ns).await;
|
||||||
|
if res.is_err() {
|
||||||
|
// NOTE: this may log sensitive data; downgrade to debug if needed.
|
||||||
|
warn!(
|
||||||
|
"Failed to apply k8s resource: {}",
|
||||||
|
serde_json::to_string_pretty(r).map_err(Error::SerdeError)?
|
||||||
|
);
|
||||||
|
}
|
||||||
|
result.push(res?);
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a [`DynamicObject`] resource using server-side apply.
|
||||||
|
pub async fn apply_dynamic(
|
||||||
|
&self,
|
||||||
|
resource: &DynamicObject,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
force_conflicts: bool,
|
||||||
|
) -> Result<DynamicObject, Error> {
|
||||||
|
trace!("apply_dynamic {resource:#?} ns={namespace:?} force={force_conflicts}");
|
||||||
|
|
||||||
|
let discovery = self.discovery().await?;
|
||||||
|
let type_meta = resource.types.as_ref().ok_or_else(|| {
|
||||||
|
Error::BuildRequest(kube::core::request::Error::Validation(
|
||||||
|
"DynamicObject must have types (apiVersion and kind)".to_string(),
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let gvk = GroupVersionKind::try_from(type_meta).map_err(|_| {
|
||||||
|
Error::BuildRequest(kube::core::request::Error::Validation(format!(
|
||||||
|
"Invalid GVK in DynamicObject: {type_meta:?}"
|
||||||
|
)))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let (ar, caps) = discovery.resolve_gvk(&gvk).ok_or_else(|| {
|
||||||
|
Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Cannot resolve GVK: {gvk:?}"
|
||||||
|
)))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let effective_ns = if caps.scope == Scope::Cluster {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
namespace.or_else(|| resource.metadata.namespace.as_deref())
|
||||||
|
};
|
||||||
|
|
||||||
|
let api = get_dynamic_api(ar, caps, self.client.clone(), effective_ns, false);
|
||||||
|
let name = resource.metadata.name.as_deref().ok_or_else(|| {
|
||||||
|
Error::BuildRequest(kube::core::request::Error::Validation(
|
||||||
|
"DynamicObject must have metadata.name".to_string(),
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"apply_dynamic kind={:?} name='{name}' ns={effective_ns:?}",
|
||||||
|
resource.types.as_ref().map(|t| &t.kind),
|
||||||
|
);
|
||||||
|
|
||||||
|
// NOTE would be nice to improve cohesion between the dynamic and typed apis and avoid copy
|
||||||
|
// pasting the dry_run and some more logic
|
||||||
|
if self.dry_run {
|
||||||
|
show_dry_run(&api, name, resource).await?;
|
||||||
|
return Ok(resource.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut patch_params = PatchParams::apply(FIELD_MANAGER);
|
||||||
|
patch_params.force = force_conflicts;
|
||||||
|
|
||||||
|
do_apply(
|
||||||
|
&api,
|
||||||
|
name,
|
||||||
|
resource,
|
||||||
|
&patch_params,
|
||||||
|
&WriteMode::CreateOrUpdate,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn apply_dynamic_many(
|
||||||
|
&self,
|
||||||
|
resources: &[DynamicObject],
|
||||||
|
namespace: Option<&str>,
|
||||||
|
force_conflicts: bool,
|
||||||
|
) -> Result<Vec<DynamicObject>, Error> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
for r in resources.iter() {
|
||||||
|
result.push(self.apply_dynamic(r, namespace, force_conflicts).await?);
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn apply_yaml_many(
|
||||||
|
&self,
|
||||||
|
#[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>,
|
||||||
|
ns: Option<&str>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
for y in yaml.iter() {
|
||||||
|
self.apply_yaml(y, ns).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn apply_yaml(
|
||||||
|
&self,
|
||||||
|
yaml: &serde_yaml::Value,
|
||||||
|
ns: Option<&str>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// NOTE wouldn't it be possible to parse this into a DynamicObject and simply call
|
||||||
|
// apply_dynamic instead of reimplementing api interactions?
|
||||||
|
let obj: DynamicObject =
|
||||||
|
serde_yaml::from_value(yaml.clone()).expect("YAML must deserialise to DynamicObject");
|
||||||
|
let name = obj.metadata.name.as_ref().expect("YAML must have a name");
|
||||||
|
|
||||||
|
let api_version = yaml["apiVersion"].as_str().expect("missing apiVersion");
|
||||||
|
let kind = yaml["kind"].as_str().expect("missing kind");
|
||||||
|
|
||||||
|
let mut it = api_version.splitn(2, '/');
|
||||||
|
let first = it.next().unwrap();
|
||||||
|
let (g, v) = match it.next() {
|
||||||
|
Some(second) => (first, second),
|
||||||
|
None => ("", first),
|
||||||
|
};
|
||||||
|
|
||||||
|
let api_resource = ApiResource::from_gvk(&GroupVersionKind::gvk(g, v, kind));
|
||||||
|
let namespace = ns.unwrap_or_else(|| {
|
||||||
|
obj.metadata
|
||||||
|
.namespace
|
||||||
|
.as_deref()
|
||||||
|
.expect("YAML must have a namespace when ns is not provided")
|
||||||
|
});
|
||||||
|
|
||||||
|
let api: Api<DynamicObject> =
|
||||||
|
Api::namespaced_with(self.client.clone(), namespace, &api_resource);
|
||||||
|
|
||||||
|
println!("Applying '{name}' in namespace '{namespace}'...");
|
||||||
|
let patch_params = PatchParams::apply(FIELD_MANAGER);
|
||||||
|
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||||
|
println!("Successfully applied '{}'.", result.name_any());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Equivalent to `kubectl apply -f <url>`.
|
||||||
|
pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
|
||||||
|
let patch_params = PatchParams::apply(FIELD_MANAGER);
|
||||||
|
let discovery = self.discovery().await?;
|
||||||
|
|
||||||
|
let yaml = reqwest::get(url)
|
||||||
|
.await
|
||||||
|
.expect("Could not fetch URL")
|
||||||
|
.text()
|
||||||
|
.await
|
||||||
|
.expect("Could not read response body");
|
||||||
|
|
||||||
|
for doc in multidoc_deserialize(&yaml).expect("Failed to parse YAML from URL") {
|
||||||
|
let obj: DynamicObject =
|
||||||
|
serde_yaml::from_value(doc).expect("YAML document is not a valid object");
|
||||||
|
let namespace = obj.metadata.namespace.as_deref().or(ns);
|
||||||
|
let type_meta = obj.types.as_ref().expect("Object is missing TypeMeta");
|
||||||
|
let gvk =
|
||||||
|
GroupVersionKind::try_from(type_meta).expect("Object has invalid GroupVersionKind");
|
||||||
|
let name = obj.name_any();
|
||||||
|
|
||||||
|
if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
|
||||||
|
let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
|
||||||
|
trace!(
|
||||||
|
"Applying {}:\n{}",
|
||||||
|
gvk.kind,
|
||||||
|
serde_yaml::to_string(&obj).unwrap_or_default()
|
||||||
|
);
|
||||||
|
let data: Value = serde_json::to_value(&obj).expect("serialisation failed");
|
||||||
|
let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
|
||||||
|
debug!("Applied {} '{name}'", gvk.kind);
|
||||||
|
} else {
|
||||||
|
warn!("Skipping document with unknown GVK: {gvk:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a dynamic API client from a [`DynamicObject`]'s type metadata.
|
||||||
|
pub(crate) fn get_api_for_dynamic_object(
|
||||||
|
&self,
|
||||||
|
object: &DynamicObject,
|
||||||
|
ns: Option<&str>,
|
||||||
|
) -> Result<Api<DynamicObject>, Error> {
|
||||||
|
let ar = object
|
||||||
|
.types
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| {
|
||||||
|
let parts: Vec<&str> = t.api_version.split('/').collect();
|
||||||
|
match parts.as_slice() {
|
||||||
|
[version] => Some(ApiResource::from_gvk(&GroupVersionKind::gvk(
|
||||||
|
"", version, &t.kind,
|
||||||
|
))),
|
||||||
|
[group, version] => Some(ApiResource::from_gvk(&GroupVersionKind::gvk(
|
||||||
|
group, version, &t.kind,
|
||||||
|
))),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::BuildRequest(kube::core::request::Error::Validation(format!(
|
||||||
|
"Invalid apiVersion in DynamicObject: {object:#?}"
|
||||||
|
)))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(match ns {
|
||||||
|
Some(ns) => Api::namespaced_with(self.client.clone(), ns, &ar),
|
||||||
|
None => Api::default_namespaced_with(self.client.clone(), &ar),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Free functions ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub(crate) fn get_dynamic_api(
|
||||||
|
resource: kube::api::ApiResource,
|
||||||
|
capabilities: kube::discovery::ApiCapabilities,
|
||||||
|
client: Client,
|
||||||
|
ns: Option<&str>,
|
||||||
|
all: bool,
|
||||||
|
) -> Api<DynamicObject> {
|
||||||
|
if capabilities.scope == Scope::Cluster || all {
|
||||||
|
Api::all_with(client, &resource)
|
||||||
|
} else if let Some(namespace) = ns {
|
||||||
|
Api::namespaced_with(client, namespace, &resource)
|
||||||
|
} else {
|
||||||
|
Api::default_namespaced_with(client, &resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn multidoc_deserialize(
|
||||||
|
data: &str,
|
||||||
|
) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
|
||||||
|
use serde::Deserialize;
|
||||||
|
let mut docs = vec![];
|
||||||
|
for de in serde_yaml::Deserializer::from_str(data) {
|
||||||
|
docs.push(serde_yaml::Value::deserialize(de)?);
|
||||||
|
}
|
||||||
|
Ok(docs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Tests ────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod apply_tests {
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use k8s_openapi::api::core::v1::ConfigMap;
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use kube::api::{DeleteParams, TypeMeta};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "requires kubernetes cluster"]
|
||||||
|
async fn apply_creates_new_configmap() {
|
||||||
|
let client = K8sClient::try_default().await.unwrap();
|
||||||
|
let ns = "default";
|
||||||
|
let name = format!(
|
||||||
|
"test-cm-{}",
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_millis()
|
||||||
|
);
|
||||||
|
|
||||||
|
let cm = ConfigMap {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(name.clone()),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: Some(BTreeMap::from([("key1".to_string(), "value1".to_string())])),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(client.apply(&cm, Some(ns)).await.is_ok());
|
||||||
|
|
||||||
|
let api: Api<ConfigMap> = Api::namespaced(client.client.clone(), ns);
|
||||||
|
let _ = api.delete(&name, &DeleteParams::default()).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "requires kubernetes cluster"]
|
||||||
|
async fn apply_is_idempotent() {
|
||||||
|
let client = K8sClient::try_default().await.unwrap();
|
||||||
|
let ns = "default";
|
||||||
|
let name = format!(
|
||||||
|
"test-idem-{}",
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_millis()
|
||||||
|
);
|
||||||
|
|
||||||
|
let cm = ConfigMap {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(name.clone()),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: Some(BTreeMap::from([("key".to_string(), "value".to_string())])),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
client.apply(&cm, Some(ns)).await.is_ok(),
|
||||||
|
"first apply failed"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
client.apply(&cm, Some(ns)).await.is_ok(),
|
||||||
|
"second apply failed (not idempotent)"
|
||||||
|
);
|
||||||
|
|
||||||
|
let api: Api<ConfigMap> = Api::namespaced(client.client.clone(), ns);
|
||||||
|
let _ = api.delete(&name, &DeleteParams::default()).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "requires kubernetes cluster"]
|
||||||
|
async fn apply_dynamic_creates_new_resource() {
|
||||||
|
let client = K8sClient::try_default().await.unwrap();
|
||||||
|
let ns = "default";
|
||||||
|
let name = format!(
|
||||||
|
"test-dyn-{}",
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_millis()
|
||||||
|
);
|
||||||
|
|
||||||
|
let obj = DynamicObject {
|
||||||
|
types: Some(TypeMeta {
|
||||||
|
api_version: "v1".to_string(),
|
||||||
|
kind: "ConfigMap".to_string(),
|
||||||
|
}),
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(name.clone()),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: serde_json::json!({}),
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = client.apply_dynamic(&obj, Some(ns), false).await;
|
||||||
|
assert!(result.is_ok(), "apply_dynamic failed: {:?}", result.err());
|
||||||
|
|
||||||
|
let api: Api<ConfigMap> = Api::namespaced(client.client.clone(), ns);
|
||||||
|
let _ = api.delete(&name, &DeleteParams::default()).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -25,9 +25,9 @@
|
|||||||
//!
|
//!
|
||||||
//! ## Example
|
//! ## Example
|
||||||
//!
|
//!
|
||||||
//! ```rust,no_run
|
//! ```
|
||||||
//! use harmony::topology::k8s::{K8sClient, helper};
|
//! use harmony_k8s::{K8sClient, helper};
|
||||||
//! use harmony::topology::KubernetesDistribution;
|
//! use harmony_k8s::KubernetesDistribution;
|
||||||
//!
|
//!
|
||||||
//! async fn write_network_config(client: &K8sClient, node: &str) {
|
//! async fn write_network_config(client: &K8sClient, node: &str) {
|
||||||
//! // Create a bundle with platform-specific RBAC
|
//! // Create a bundle with platform-specific RBAC
|
||||||
@@ -56,7 +56,7 @@ use kube::{Error, Resource, ResourceExt, api::DynamicObject};
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
|
||||||
use crate::domain::topology::k8s::K8sClient;
|
use crate::K8sClient;
|
||||||
|
|
||||||
/// A ResourceBundle represents a logical unit of work consisting of multiple
|
/// A ResourceBundle represents a logical unit of work consisting of multiple
|
||||||
/// Kubernetes resources that should be applied or deleted together.
|
/// Kubernetes resources that should be applied or deleted together.
|
||||||
99
harmony-k8s/src/client.rs
Normal file
99
harmony-k8s/src/client.rs
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use kube::config::{KubeConfigOptions, Kubeconfig};
|
||||||
|
use kube::{Client, Config, Discovery, Error};
|
||||||
|
use log::error;
|
||||||
|
use serde::Serialize;
|
||||||
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
|
use crate::types::KubernetesDistribution;
|
||||||
|
|
||||||
|
// TODO not cool, should use a proper configuration mechanism
|
||||||
|
// cli arg, env var, config file
|
||||||
|
fn read_dry_run_from_env() -> bool {
|
||||||
|
std::env::var("DRY_RUN")
|
||||||
|
.map(|v| v == "true" || v == "1")
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct K8sClient {
|
||||||
|
pub(crate) client: Client,
|
||||||
|
/// When `true` no mutation is sent to the API server; diffs are printed
|
||||||
|
/// to stdout instead. Initialised from the `DRY_RUN` environment variable.
|
||||||
|
pub(crate) dry_run: bool,
|
||||||
|
pub(crate) k8s_distribution: Arc<OnceCell<KubernetesDistribution>>,
|
||||||
|
pub(crate) discovery: Arc<OnceCell<Discovery>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for K8sClient {
|
||||||
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
todo!("K8sClient serialization is not meaningful; remove this impl if unused")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for K8sClient {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_fmt(format_args!(
|
||||||
|
"K8sClient {{ namespace: {}, dry_run: {} }}",
|
||||||
|
self.client.default_namespace(),
|
||||||
|
self.dry_run,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl K8sClient {
|
||||||
|
/// Create a client, reading `DRY_RUN` from the environment.
|
||||||
|
pub fn new(client: Client) -> Self {
|
||||||
|
Self {
|
||||||
|
dry_run: read_dry_run_from_env(),
|
||||||
|
client,
|
||||||
|
k8s_distribution: Arc::new(OnceCell::new()),
|
||||||
|
discovery: Arc::new(OnceCell::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a client that always operates in dry-run mode, regardless of
|
||||||
|
/// the environment variable.
|
||||||
|
pub fn new_dry_run(client: Client) -> Self {
|
||||||
|
Self {
|
||||||
|
dry_run: true,
|
||||||
|
..Self::new(client)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns `true` if this client is operating in dry-run mode.
|
||||||
|
pub fn is_dry_run(&self) -> bool {
|
||||||
|
self.dry_run
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn try_default() -> Result<Self, Error> {
|
||||||
|
Ok(Self::new(Client::try_default().await?))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_kubeconfig(path: &str) -> Option<Self> {
|
||||||
|
Self::from_kubeconfig_with_opts(path, &KubeConfigOptions::default()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_kubeconfig_with_context(path: &str, context: Option<String>) -> Option<Self> {
|
||||||
|
let mut opts = KubeConfigOptions::default();
|
||||||
|
opts.context = context;
|
||||||
|
Self::from_kubeconfig_with_opts(path, &opts).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_kubeconfig_with_opts(path: &str, opts: &KubeConfigOptions) -> Option<Self> {
|
||||||
|
let k = match Kubeconfig::read_from(path) {
|
||||||
|
Ok(k) => k,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to load kubeconfig from {path}: {e}");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Some(Self::new(
|
||||||
|
Client::try_from(Config::from_custom_kubeconfig(k, opts).await.unwrap()).unwrap(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
83
harmony-k8s/src/discovery.rs
Normal file
83
harmony-k8s/src/discovery.rs
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use kube::{Discovery, Error};
|
||||||
|
use log::{debug, error, info, trace, warn};
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
use tokio_retry::{Retry, strategy::ExponentialBackoff};
|
||||||
|
|
||||||
|
use crate::client::K8sClient;
|
||||||
|
use crate::types::KubernetesDistribution;
|
||||||
|
|
||||||
|
impl K8sClient {
|
||||||
|
pub async fn get_apiserver_version(
|
||||||
|
&self,
|
||||||
|
) -> Result<k8s_openapi::apimachinery::pkg::version::Info, Error> {
|
||||||
|
self.client.clone().apiserver_version().await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs (and caches) Kubernetes API discovery with exponential-backoff retries.
|
||||||
|
pub async fn discovery(&self) -> Result<&Discovery, Error> {
|
||||||
|
let retry_strategy = ExponentialBackoff::from_millis(1000)
|
||||||
|
.max_delay(Duration::from_secs(32))
|
||||||
|
.take(6);
|
||||||
|
|
||||||
|
let attempt = Mutex::new(0u32);
|
||||||
|
Retry::spawn(retry_strategy, || async {
|
||||||
|
let mut n = attempt.lock().await;
|
||||||
|
*n += 1;
|
||||||
|
match self
|
||||||
|
.discovery
|
||||||
|
.get_or_try_init(async || {
|
||||||
|
debug!("Running Kubernetes API discovery (attempt {})", *n);
|
||||||
|
let d = Discovery::new(self.client.clone()).run().await?;
|
||||||
|
debug!("Kubernetes API discovery completed");
|
||||||
|
Ok(d)
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(d) => Ok(d),
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Kubernetes API discovery failed (attempt {}): {}", *n, e);
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!("Kubernetes API discovery failed after all retries: {}", e);
|
||||||
|
e
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Detect which Kubernetes distribution is running. Result is cached for
|
||||||
|
/// the lifetime of the client.
|
||||||
|
pub async fn get_k8s_distribution(&self) -> Result<KubernetesDistribution, Error> {
|
||||||
|
self.k8s_distribution
|
||||||
|
.get_or_try_init(async || {
|
||||||
|
debug!("Detecting Kubernetes distribution");
|
||||||
|
let api_groups = self.client.list_api_groups().await?;
|
||||||
|
trace!("list_api_groups: {:?}", api_groups);
|
||||||
|
|
||||||
|
let version = self.get_apiserver_version().await?;
|
||||||
|
|
||||||
|
if api_groups
|
||||||
|
.groups
|
||||||
|
.iter()
|
||||||
|
.any(|g| g.name == "project.openshift.io")
|
||||||
|
{
|
||||||
|
info!("Detected distribution: OpenshiftFamily");
|
||||||
|
return Ok(KubernetesDistribution::OpenshiftFamily);
|
||||||
|
}
|
||||||
|
|
||||||
|
if version.git_version.contains("k3s") {
|
||||||
|
info!("Detected distribution: K3sFamily");
|
||||||
|
return Ok(KubernetesDistribution::K3sFamily);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Distribution not identified, using Default");
|
||||||
|
Ok(KubernetesDistribution::Default)
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use crate::topology::KubernetesDistribution;
|
use crate::KubernetesDistribution;
|
||||||
|
|
||||||
use super::bundle::ResourceBundle;
|
use super::bundle::ResourceBundle;
|
||||||
use super::config::PRIVILEGED_POD_IMAGE;
|
use super::config::PRIVILEGED_POD_IMAGE;
|
||||||
@@ -133,9 +133,9 @@ pub fn host_root_volume() -> (Volume, VolumeMount) {
|
|||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```rust,no_run
|
/// ```
|
||||||
/// # use harmony::topology::k8s::helper::{build_privileged_bundle, PrivilegedPodConfig};
|
/// use harmony_k8s::helper::{build_privileged_bundle, PrivilegedPodConfig};
|
||||||
/// # use harmony::topology::KubernetesDistribution;
|
/// use harmony_k8s::KubernetesDistribution;
|
||||||
/// let bundle = build_privileged_bundle(
|
/// let bundle = build_privileged_bundle(
|
||||||
/// PrivilegedPodConfig {
|
/// PrivilegedPodConfig {
|
||||||
/// name: "network-setup".to_string(),
|
/// name: "network-setup".to_string(),
|
||||||
13
harmony-k8s/src/lib.rs
Normal file
13
harmony-k8s/src/lib.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
pub mod apply;
|
||||||
|
pub mod bundle;
|
||||||
|
pub mod client;
|
||||||
|
pub mod config;
|
||||||
|
pub mod discovery;
|
||||||
|
pub mod helper;
|
||||||
|
pub mod node;
|
||||||
|
pub mod pod;
|
||||||
|
pub mod resources;
|
||||||
|
pub mod types;
|
||||||
|
|
||||||
|
pub use client::K8sClient;
|
||||||
|
pub use types::{DrainOptions, KubernetesDistribution, NodeFile, ScopeResolver, WriteMode};
|
||||||
3
harmony-k8s/src/main.rs
Normal file
3
harmony-k8s/src/main.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
fn main() {
|
||||||
|
println!("Hello, world!");
|
||||||
|
}
|
||||||
722
harmony-k8s/src/node.rs
Normal file
722
harmony-k8s/src/node.rs
Normal file
@@ -0,0 +1,722 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use k8s_openapi::api::core::v1::{
|
||||||
|
ConfigMap, ConfigMapVolumeSource, Node, Pod, Volume, VolumeMount,
|
||||||
|
};
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use kube::{
|
||||||
|
Error,
|
||||||
|
api::{Api, DeleteParams, EvictParams, ListParams, PostParams},
|
||||||
|
core::ErrorResponse,
|
||||||
|
error::DiscoveryError,
|
||||||
|
};
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
use crate::client::K8sClient;
|
||||||
|
use crate::helper::{self, PrivilegedPodConfig};
|
||||||
|
use crate::types::{DrainOptions, NodeFile};
|
||||||
|
|
||||||
|
impl K8sClient {
|
||||||
|
pub async fn cordon_node(&self, node_name: &str) -> Result<(), Error> {
|
||||||
|
Api::<Node>::all(self.client.clone())
|
||||||
|
.cordon(node_name)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn uncordon_node(&self, node_name: &str) -> Result<(), Error> {
|
||||||
|
Api::<Node>::all(self.client.clone())
|
||||||
|
.uncordon(node_name)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn wait_for_node_ready(&self, node_name: &str) -> Result<(), Error> {
|
||||||
|
self.wait_for_node_ready_with_timeout(node_name, Duration::from_secs(600))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_node_ready_with_timeout(
|
||||||
|
&self,
|
||||||
|
node_name: &str,
|
||||||
|
timeout: Duration,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let api: Api<Node> = Api::all(self.client.clone());
|
||||||
|
let start = tokio::time::Instant::now();
|
||||||
|
let poll = Duration::from_secs(5);
|
||||||
|
loop {
|
||||||
|
if start.elapsed() > timeout {
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Node '{node_name}' did not become Ready within {timeout:?}"
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
match api.get(node_name).await {
|
||||||
|
Ok(node) => {
|
||||||
|
if node
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.conditions.as_ref())
|
||||||
|
.map(|conds| {
|
||||||
|
conds
|
||||||
|
.iter()
|
||||||
|
.any(|c| c.type_ == "Ready" && c.status == "True")
|
||||||
|
})
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
|
debug!("Node '{node_name}' is Ready");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => debug!("Error polling node '{node_name}': {e}"),
|
||||||
|
}
|
||||||
|
sleep(poll).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_node_not_ready(
|
||||||
|
&self,
|
||||||
|
node_name: &str,
|
||||||
|
timeout: Duration,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let api: Api<Node> = Api::all(self.client.clone());
|
||||||
|
let start = tokio::time::Instant::now();
|
||||||
|
let poll = Duration::from_secs(5);
|
||||||
|
loop {
|
||||||
|
if start.elapsed() > timeout {
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Node '{node_name}' did not become NotReady within {timeout:?}"
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
match api.get(node_name).await {
|
||||||
|
Ok(node) => {
|
||||||
|
let is_ready = node
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.conditions.as_ref())
|
||||||
|
.map(|conds| {
|
||||||
|
conds
|
||||||
|
.iter()
|
||||||
|
.any(|c| c.type_ == "Ready" && c.status == "True")
|
||||||
|
})
|
||||||
|
.unwrap_or(false);
|
||||||
|
if !is_ready {
|
||||||
|
debug!("Node '{node_name}' is NotReady");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => debug!("Error polling node '{node_name}': {e}"),
|
||||||
|
}
|
||||||
|
sleep(poll).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn list_pods_on_node(&self, node_name: &str) -> Result<Vec<Pod>, Error> {
|
||||||
|
let api: Api<Pod> = Api::all(self.client.clone());
|
||||||
|
Ok(api
|
||||||
|
.list(&ListParams::default().fields(&format!("spec.nodeName={node_name}")))
|
||||||
|
.await?
|
||||||
|
.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_mirror_pod(pod: &Pod) -> bool {
|
||||||
|
pod.metadata
|
||||||
|
.annotations
|
||||||
|
.as_ref()
|
||||||
|
.map(|a| a.contains_key("kubernetes.io/config.mirror"))
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_daemonset_pod(pod: &Pod) -> bool {
|
||||||
|
pod.metadata
|
||||||
|
.owner_references
|
||||||
|
.as_ref()
|
||||||
|
.map(|refs| refs.iter().any(|r| r.kind == "DaemonSet"))
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_emptydir_volume(pod: &Pod) -> bool {
|
||||||
|
pod.spec
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.volumes.as_ref())
|
||||||
|
.map(|vols| vols.iter().any(|v| v.empty_dir.is_some()))
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_completed_pod(pod: &Pod) -> bool {
|
||||||
|
pod.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.phase.as_deref())
|
||||||
|
.map(|phase| phase == "Succeeded" || phase == "Failed")
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn classify_pods_for_drain(
|
||||||
|
pods: &[Pod],
|
||||||
|
options: &DrainOptions,
|
||||||
|
) -> Result<(Vec<Pod>, Vec<String>), String> {
|
||||||
|
let mut evictable = Vec::new();
|
||||||
|
let mut skipped = Vec::new();
|
||||||
|
let mut blocking = Vec::new();
|
||||||
|
|
||||||
|
for pod in pods {
|
||||||
|
let name = pod.metadata.name.as_deref().unwrap_or("<unknown>");
|
||||||
|
let ns = pod.metadata.namespace.as_deref().unwrap_or("<unknown>");
|
||||||
|
let qualified = format!("{ns}/{name}");
|
||||||
|
|
||||||
|
if Self::is_mirror_pod(pod) {
|
||||||
|
skipped.push(format!("{qualified} (mirror pod)"));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if Self::is_completed_pod(pod) {
|
||||||
|
skipped.push(format!("{qualified} (completed)"));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if Self::is_daemonset_pod(pod) {
|
||||||
|
if options.ignore_daemonsets {
|
||||||
|
skipped.push(format!("{qualified} (DaemonSet-managed)"));
|
||||||
|
} else {
|
||||||
|
blocking.push(format!(
|
||||||
|
"{qualified} is managed by a DaemonSet (set ignore_daemonsets to skip)"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if Self::has_emptydir_volume(pod) && !options.delete_emptydir_data {
|
||||||
|
blocking.push(format!(
|
||||||
|
"{qualified} uses emptyDir volumes (set delete_emptydir_data to allow eviction)"
|
||||||
|
));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
evictable.push(pod.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !blocking.is_empty() {
|
||||||
|
return Err(format!(
|
||||||
|
"Cannot drain node — the following pods block eviction:\n - {}",
|
||||||
|
blocking.join("\n - ")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok((evictable, skipped))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn evict_pod(&self, pod: &Pod) -> Result<(), Error> {
|
||||||
|
let name = pod.metadata.name.as_deref().unwrap_or_default();
|
||||||
|
let ns = pod.metadata.namespace.as_deref().unwrap_or_default();
|
||||||
|
debug!("Evicting pod {ns}/{name}");
|
||||||
|
Api::<Pod>::namespaced(self.client.clone(), ns)
|
||||||
|
.evict(name, &EvictParams::default())
|
||||||
|
.await
|
||||||
|
.map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Drains a node: cordon → classify → evict & wait.
|
||||||
|
pub async fn drain_node(&self, node_name: &str, options: &DrainOptions) -> Result<(), Error> {
|
||||||
|
debug!("Cordoning '{node_name}'");
|
||||||
|
self.cordon_node(node_name).await?;
|
||||||
|
|
||||||
|
let pods = self.list_pods_on_node(node_name).await?;
|
||||||
|
debug!("Found {} pod(s) on '{node_name}'", pods.len());
|
||||||
|
|
||||||
|
let (evictable, skipped) =
|
||||||
|
Self::classify_pods_for_drain(&pods, options).map_err(|msg| {
|
||||||
|
error!("{msg}");
|
||||||
|
Error::Discovery(DiscoveryError::MissingResource(msg))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
for s in &skipped {
|
||||||
|
info!("Skipping pod: {s}");
|
||||||
|
}
|
||||||
|
if evictable.is_empty() {
|
||||||
|
info!("No pods to evict on '{node_name}'");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
info!("Evicting {} pod(s) from '{node_name}'", evictable.len());
|
||||||
|
|
||||||
|
let mut start = tokio::time::Instant::now();
|
||||||
|
let poll = Duration::from_secs(5);
|
||||||
|
let mut pending = evictable;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
for pod in &pending {
|
||||||
|
match self.evict_pod(pod).await {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => {}
|
||||||
|
Err(Error::Api(ErrorResponse { code: 429, .. })) => {
|
||||||
|
warn!(
|
||||||
|
"PDB blocked eviction of {}/{}; will retry",
|
||||||
|
pod.metadata.namespace.as_deref().unwrap_or(""),
|
||||||
|
pod.metadata.name.as_deref().unwrap_or("")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to evict {}/{}: {e}",
|
||||||
|
pod.metadata.namespace.as_deref().unwrap_or(""),
|
||||||
|
pod.metadata.name.as_deref().unwrap_or("")
|
||||||
|
);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sleep(poll).await;
|
||||||
|
|
||||||
|
let mut still_present = Vec::new();
|
||||||
|
for pod in pending {
|
||||||
|
let ns = pod.metadata.namespace.as_deref().unwrap_or_default();
|
||||||
|
let name = pod.metadata.name.as_deref().unwrap_or_default();
|
||||||
|
match self.get_pod(name, Some(ns)).await? {
|
||||||
|
Some(_) => still_present.push(pod),
|
||||||
|
None => debug!("Pod {ns}/{name} evicted"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pending = still_present;
|
||||||
|
|
||||||
|
if pending.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if start.elapsed() > options.timeout {
|
||||||
|
match helper::prompt_drain_timeout_action(
|
||||||
|
node_name,
|
||||||
|
pending.len(),
|
||||||
|
options.timeout,
|
||||||
|
)? {
|
||||||
|
helper::DrainTimeoutAction::Accept => break,
|
||||||
|
helper::DrainTimeoutAction::Retry => {
|
||||||
|
start = tokio::time::Instant::now();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
helper::DrainTimeoutAction::Abort => {
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Drain aborted. {} pod(s) remaining on '{node_name}'",
|
||||||
|
pending.len()
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("Waiting for {} pod(s) on '{node_name}'", pending.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("'{node_name}' drained successfully");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Safely reboots a node: drain → reboot → wait for Ready → uncordon.
|
||||||
|
pub async fn reboot_node(
|
||||||
|
&self,
|
||||||
|
node_name: &str,
|
||||||
|
drain_options: &DrainOptions,
|
||||||
|
timeout: Duration,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
info!("Starting reboot for '{node_name}'");
|
||||||
|
let node_api: Api<Node> = Api::all(self.client.clone());
|
||||||
|
|
||||||
|
let boot_id_before = node_api
|
||||||
|
.get(node_name)
|
||||||
|
.await?
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.node_info.as_ref())
|
||||||
|
.map(|ni| ni.boot_id.clone())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Node '{node_name}' has no boot_id in status"
|
||||||
|
)))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
info!("Draining '{node_name}'");
|
||||||
|
self.drain_node(node_name, drain_options).await?;
|
||||||
|
|
||||||
|
let start = tokio::time::Instant::now();
|
||||||
|
|
||||||
|
info!("Scheduling reboot for '{node_name}'");
|
||||||
|
let reboot_cmd =
|
||||||
|
"echo rebooting ; nohup bash -c 'sleep 5 && nsenter -t 1 -m -- systemctl reboot'";
|
||||||
|
match self
|
||||||
|
.run_privileged_command_on_node(node_name, reboot_cmd)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => debug!("Reboot command dispatched"),
|
||||||
|
Err(e) => debug!("Reboot command error (expected if node began shutdown): {e}"),
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Waiting for '{node_name}' to begin shutdown");
|
||||||
|
self.wait_for_node_not_ready(node_name, timeout.saturating_sub(start.elapsed()))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if start.elapsed() > timeout {
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Timeout during reboot of '{node_name}' (shutdown phase)"
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Waiting for '{node_name}' to come back online");
|
||||||
|
self.wait_for_node_ready_with_timeout(node_name, timeout.saturating_sub(start.elapsed()))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if start.elapsed() > timeout {
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Timeout during reboot of '{node_name}' (ready phase)"
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
|
||||||
|
let boot_id_after = node_api
|
||||||
|
.get(node_name)
|
||||||
|
.await?
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.node_info.as_ref())
|
||||||
|
.map(|ni| ni.boot_id.clone())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Node '{node_name}' has no boot_id after reboot"
|
||||||
|
)))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if boot_id_before == boot_id_after {
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Node '{node_name}' did not actually reboot (boot_id unchanged: {boot_id_before})"
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("'{node_name}' rebooted ({boot_id_before} → {boot_id_after})");
|
||||||
|
self.uncordon_node(node_name).await?;
|
||||||
|
info!("'{node_name}' reboot complete ({:?})", start.elapsed());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write a set of files to a node's filesystem via a privileged ephemeral pod.
|
||||||
|
pub async fn write_files_to_node(
|
||||||
|
&self,
|
||||||
|
node_name: &str,
|
||||||
|
files: &[NodeFile],
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let ns = self.client.default_namespace();
|
||||||
|
let suffix = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_millis();
|
||||||
|
let name = format!("harmony-k8s-writer-{suffix}");
|
||||||
|
|
||||||
|
debug!("Writing {} file(s) to '{node_name}'", files.len());
|
||||||
|
|
||||||
|
let mut data = BTreeMap::new();
|
||||||
|
let mut script = String::from("set -e\n");
|
||||||
|
for (i, file) in files.iter().enumerate() {
|
||||||
|
let key = format!("f{i}");
|
||||||
|
data.insert(key.clone(), file.content.clone());
|
||||||
|
script.push_str(&format!("mkdir -p \"$(dirname \"/host{}\")\"\n", file.path));
|
||||||
|
script.push_str(&format!("cp \"/payload/{key}\" \"/host{}\"\n", file.path));
|
||||||
|
script.push_str(&format!("chmod {:o} \"/host{}\"\n", file.mode, file.path));
|
||||||
|
}
|
||||||
|
|
||||||
|
let cm = ConfigMap {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(name.clone()),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: Some(data),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let cm_api: Api<ConfigMap> = Api::namespaced(self.client.clone(), ns);
|
||||||
|
cm_api.create(&PostParams::default(), &cm).await?;
|
||||||
|
debug!("Created ConfigMap '{name}'");
|
||||||
|
|
||||||
|
let (host_vol, host_mount) = helper::host_root_volume();
|
||||||
|
let payload_vol = Volume {
|
||||||
|
name: "payload".to_string(),
|
||||||
|
config_map: Some(ConfigMapVolumeSource {
|
||||||
|
name: name.clone(),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let payload_mount = VolumeMount {
|
||||||
|
name: "payload".to_string(),
|
||||||
|
mount_path: "/payload".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let bundle = helper::build_privileged_bundle(
|
||||||
|
PrivilegedPodConfig {
|
||||||
|
name: name.clone(),
|
||||||
|
namespace: ns.to_string(),
|
||||||
|
node_name: node_name.to_string(),
|
||||||
|
container_name: "writer".to_string(),
|
||||||
|
command: vec!["/bin/bash".to_string(), "-c".to_string(), script],
|
||||||
|
volumes: vec![payload_vol, host_vol],
|
||||||
|
volume_mounts: vec![payload_mount, host_mount],
|
||||||
|
host_pid: false,
|
||||||
|
host_network: false,
|
||||||
|
},
|
||||||
|
&self.get_k8s_distribution().await?,
|
||||||
|
);
|
||||||
|
|
||||||
|
bundle.apply(self).await?;
|
||||||
|
debug!("Created privileged pod bundle '{name}'");
|
||||||
|
|
||||||
|
let result = self.wait_for_pod_completion(&name, ns).await;
|
||||||
|
|
||||||
|
debug!("Cleaning up '{name}'");
|
||||||
|
let _ = bundle.delete(self).await;
|
||||||
|
let _ = cm_api.delete(&name, &DeleteParams::default()).await;
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run a privileged command on a node via an ephemeral pod.
|
||||||
|
pub async fn run_privileged_command_on_node(
|
||||||
|
&self,
|
||||||
|
node_name: &str,
|
||||||
|
command: &str,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let namespace = self.client.default_namespace();
|
||||||
|
let suffix = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_millis();
|
||||||
|
let name = format!("harmony-k8s-cmd-{suffix}");
|
||||||
|
|
||||||
|
debug!("Running privileged command on '{node_name}': {command}");
|
||||||
|
|
||||||
|
let (host_vol, host_mount) = helper::host_root_volume();
|
||||||
|
let bundle = helper::build_privileged_bundle(
|
||||||
|
PrivilegedPodConfig {
|
||||||
|
name: name.clone(),
|
||||||
|
namespace: namespace.to_string(),
|
||||||
|
node_name: node_name.to_string(),
|
||||||
|
container_name: "runner".to_string(),
|
||||||
|
command: vec![
|
||||||
|
"/bin/bash".to_string(),
|
||||||
|
"-c".to_string(),
|
||||||
|
command.to_string(),
|
||||||
|
],
|
||||||
|
volumes: vec![host_vol],
|
||||||
|
volume_mounts: vec![host_mount],
|
||||||
|
host_pid: true,
|
||||||
|
host_network: true,
|
||||||
|
},
|
||||||
|
&self.get_k8s_distribution().await?,
|
||||||
|
);
|
||||||
|
|
||||||
|
bundle.apply(self).await?;
|
||||||
|
debug!("Privileged pod '{name}' created");
|
||||||
|
|
||||||
|
let result = self.wait_for_pod_completion(&name, namespace).await;
|
||||||
|
|
||||||
|
debug!("Cleaning up '{name}'");
|
||||||
|
let _ = bundle.delete(self).await;
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Tests ────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use k8s_openapi::api::core::v1::{EmptyDirVolumeSource, PodSpec, PodStatus, Volume};
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ObjectMeta, OwnerReference};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn base_pod(name: &str, ns: &str) -> Pod {
|
||||||
|
Pod {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(name.to_string()),
|
||||||
|
namespace: Some(ns.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: Some(PodSpec::default()),
|
||||||
|
status: Some(PodStatus {
|
||||||
|
phase: Some("Running".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mirror_pod(name: &str, ns: &str) -> Pod {
|
||||||
|
let mut pod = base_pod(name, ns);
|
||||||
|
pod.metadata.annotations = Some(std::collections::BTreeMap::from([(
|
||||||
|
"kubernetes.io/config.mirror".to_string(),
|
||||||
|
"abc123".to_string(),
|
||||||
|
)]));
|
||||||
|
pod
|
||||||
|
}
|
||||||
|
|
||||||
|
fn daemonset_pod(name: &str, ns: &str) -> Pod {
|
||||||
|
let mut pod = base_pod(name, ns);
|
||||||
|
pod.metadata.owner_references = Some(vec![OwnerReference {
|
||||||
|
api_version: "apps/v1".to_string(),
|
||||||
|
kind: "DaemonSet".to_string(),
|
||||||
|
name: "some-ds".to_string(),
|
||||||
|
uid: "uid-ds".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
}]);
|
||||||
|
pod
|
||||||
|
}
|
||||||
|
|
||||||
|
fn emptydir_pod(name: &str, ns: &str) -> Pod {
|
||||||
|
let mut pod = base_pod(name, ns);
|
||||||
|
pod.spec = Some(PodSpec {
|
||||||
|
volumes: Some(vec![Volume {
|
||||||
|
name: "scratch".to_string(),
|
||||||
|
empty_dir: Some(EmptyDirVolumeSource::default()),
|
||||||
|
..Default::default()
|
||||||
|
}]),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
pod
|
||||||
|
}
|
||||||
|
|
||||||
|
fn completed_pod(name: &str, ns: &str, phase: &str) -> Pod {
|
||||||
|
let mut pod = base_pod(name, ns);
|
||||||
|
pod.status = Some(PodStatus {
|
||||||
|
phase: Some(phase.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
pod
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_opts() -> DrainOptions {
|
||||||
|
DrainOptions::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
// All test bodies are identical to the original — only the module path changed.
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn empty_pod_list_returns_empty_vecs() {
|
||||||
|
let (e, s) = K8sClient::classify_pods_for_drain(&[], &default_opts()).unwrap();
|
||||||
|
assert!(e.is_empty());
|
||||||
|
assert!(s.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn normal_pod_is_evictable() {
|
||||||
|
let pods = vec![base_pod("web", "default")];
|
||||||
|
let (e, s) = K8sClient::classify_pods_for_drain(&pods, &default_opts()).unwrap();
|
||||||
|
assert_eq!(e.len(), 1);
|
||||||
|
assert!(s.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn mirror_pod_is_skipped() {
|
||||||
|
let pods = vec![mirror_pod("kube-apiserver", "kube-system")];
|
||||||
|
let (e, s) = K8sClient::classify_pods_for_drain(&pods, &default_opts()).unwrap();
|
||||||
|
assert!(e.is_empty());
|
||||||
|
assert!(s[0].contains("mirror pod"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn completed_pods_are_skipped() {
|
||||||
|
for phase in ["Succeeded", "Failed"] {
|
||||||
|
let pods = vec![completed_pod("job", "batch", phase)];
|
||||||
|
let (e, s) = K8sClient::classify_pods_for_drain(&pods, &default_opts()).unwrap();
|
||||||
|
assert!(e.is_empty());
|
||||||
|
assert!(s[0].contains("completed"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn daemonset_skipped_when_ignored() {
|
||||||
|
let pods = vec![daemonset_pod("fluentd", "logging")];
|
||||||
|
let opts = DrainOptions {
|
||||||
|
ignore_daemonsets: true,
|
||||||
|
..default_opts()
|
||||||
|
};
|
||||||
|
let (e, s) = K8sClient::classify_pods_for_drain(&pods, &opts).unwrap();
|
||||||
|
assert!(e.is_empty());
|
||||||
|
assert!(s[0].contains("DaemonSet-managed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn daemonset_blocks_when_not_ignored() {
|
||||||
|
let pods = vec![daemonset_pod("fluentd", "logging")];
|
||||||
|
let opts = DrainOptions {
|
||||||
|
ignore_daemonsets: false,
|
||||||
|
..default_opts()
|
||||||
|
};
|
||||||
|
let err = K8sClient::classify_pods_for_drain(&pods, &opts).unwrap_err();
|
||||||
|
assert!(err.contains("DaemonSet") && err.contains("logging/fluentd"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn emptydir_blocks_without_flag() {
|
||||||
|
let pods = vec![emptydir_pod("cache", "default")];
|
||||||
|
let opts = DrainOptions {
|
||||||
|
delete_emptydir_data: false,
|
||||||
|
..default_opts()
|
||||||
|
};
|
||||||
|
let err = K8sClient::classify_pods_for_drain(&pods, &opts).unwrap_err();
|
||||||
|
assert!(err.contains("emptyDir") && err.contains("default/cache"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn emptydir_evictable_with_flag() {
|
||||||
|
let pods = vec![emptydir_pod("cache", "default")];
|
||||||
|
let opts = DrainOptions {
|
||||||
|
delete_emptydir_data: true,
|
||||||
|
..default_opts()
|
||||||
|
};
|
||||||
|
let (e, s) = K8sClient::classify_pods_for_drain(&pods, &opts).unwrap();
|
||||||
|
assert_eq!(e.len(), 1);
|
||||||
|
assert!(s.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn multiple_blocking_all_reported() {
|
||||||
|
let pods = vec![daemonset_pod("ds", "ns1"), emptydir_pod("ed", "ns2")];
|
||||||
|
let opts = DrainOptions {
|
||||||
|
ignore_daemonsets: false,
|
||||||
|
delete_emptydir_data: false,
|
||||||
|
..default_opts()
|
||||||
|
};
|
||||||
|
let err = K8sClient::classify_pods_for_drain(&pods, &opts).unwrap_err();
|
||||||
|
assert!(err.contains("ns1/ds") && err.contains("ns2/ed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn mixed_pods_classified_correctly() {
|
||||||
|
let pods = vec![
|
||||||
|
base_pod("web", "default"),
|
||||||
|
mirror_pod("kube-apiserver", "kube-system"),
|
||||||
|
daemonset_pod("fluentd", "logging"),
|
||||||
|
completed_pod("job", "batch", "Succeeded"),
|
||||||
|
base_pod("api", "default"),
|
||||||
|
];
|
||||||
|
let (e, s) = K8sClient::classify_pods_for_drain(&pods, &default_opts()).unwrap();
|
||||||
|
let names: Vec<&str> = e
|
||||||
|
.iter()
|
||||||
|
.map(|p| p.metadata.name.as_deref().unwrap())
|
||||||
|
.collect();
|
||||||
|
assert_eq!(names, vec!["web", "api"]);
|
||||||
|
assert_eq!(s.len(), 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn mirror_checked_before_completed() {
|
||||||
|
let mut pod = mirror_pod("static-etcd", "kube-system");
|
||||||
|
pod.status = Some(PodStatus {
|
||||||
|
phase: Some("Succeeded".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let (_, s) = K8sClient::classify_pods_for_drain(&[pod], &default_opts()).unwrap();
|
||||||
|
assert!(s[0].contains("mirror pod"), "got: {}", s[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn completed_checked_before_daemonset() {
|
||||||
|
let mut pod = daemonset_pod("collector", "monitoring");
|
||||||
|
pod.status = Some(PodStatus {
|
||||||
|
phase: Some("Failed".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let (_, s) = K8sClient::classify_pods_for_drain(&[pod], &default_opts()).unwrap();
|
||||||
|
assert!(s[0].contains("completed"), "got: {}", s[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
193
harmony-k8s/src/pod.rs
Normal file
193
harmony-k8s/src/pod.rs
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use k8s_openapi::api::core::v1::Pod;
|
||||||
|
use kube::{
|
||||||
|
Error,
|
||||||
|
api::{Api, AttachParams, ListParams},
|
||||||
|
error::DiscoveryError,
|
||||||
|
runtime::reflector::Lookup,
|
||||||
|
};
|
||||||
|
use log::debug;
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
use crate::client::K8sClient;
|
||||||
|
|
||||||
|
impl K8sClient {
|
||||||
|
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
||||||
|
let api: Api<Pod> = match namespace {
|
||||||
|
Some(ns) => Api::namespaced(self.client.clone(), ns),
|
||||||
|
None => Api::default_namespaced(self.client.clone()),
|
||||||
|
};
|
||||||
|
api.get_opt(name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn wait_for_pod_ready(
|
||||||
|
&self,
|
||||||
|
pod_name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut elapsed = 0u64;
|
||||||
|
let interval = 5u64;
|
||||||
|
let timeout_secs = 120u64;
|
||||||
|
loop {
|
||||||
|
if let Some(p) = self.get_pod(pod_name, namespace).await? {
|
||||||
|
if let Some(phase) = p.status.and_then(|s| s.phase) {
|
||||||
|
if phase.to_lowercase() == "running" {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if elapsed >= timeout_secs {
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Pod '{}' in '{}' did not become ready within {timeout_secs}s",
|
||||||
|
pod_name,
|
||||||
|
namespace.unwrap_or("<default>"),
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
sleep(Duration::from_secs(interval)).await;
|
||||||
|
elapsed += interval;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Polls a pod until it reaches `Succeeded` or `Failed`, then returns its
|
||||||
|
/// logs. Used internally by node operations.
|
||||||
|
pub(crate) async fn wait_for_pod_completion(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: &str,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let api: Api<Pod> = Api::namespaced(self.client.clone(), namespace);
|
||||||
|
let poll_interval = Duration::from_secs(2);
|
||||||
|
for _ in 0..60 {
|
||||||
|
sleep(poll_interval).await;
|
||||||
|
let p = api.get(name).await?;
|
||||||
|
match p.status.and_then(|s| s.phase).as_deref() {
|
||||||
|
Some("Succeeded") => {
|
||||||
|
let logs = api
|
||||||
|
.logs(name, &Default::default())
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
debug!("Pod {namespace}/{name} succeeded. Logs: {logs}");
|
||||||
|
return Ok(logs);
|
||||||
|
}
|
||||||
|
Some("Failed") => {
|
||||||
|
let logs = api
|
||||||
|
.logs(name, &Default::default())
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
debug!("Pod {namespace}/{name} failed. Logs: {logs}");
|
||||||
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Pod '{name}' failed.\n{logs}"
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
|
"Timed out waiting for pod '{name}'"
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute a command in the first pod matching `{label}={name}`.
|
||||||
|
pub async fn exec_app_capture_output(
|
||||||
|
&self,
|
||||||
|
name: String,
|
||||||
|
label: String,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
command: Vec<&str>,
|
||||||
|
) -> Result<String, String> {
|
||||||
|
let api: Api<Pod> = match namespace {
|
||||||
|
Some(ns) => Api::namespaced(self.client.clone(), ns),
|
||||||
|
None => Api::default_namespaced(self.client.clone()),
|
||||||
|
};
|
||||||
|
let pod_list = api
|
||||||
|
.list(&ListParams::default().labels(&format!("{label}={name}")))
|
||||||
|
.await
|
||||||
|
.expect("Failed to list pods");
|
||||||
|
|
||||||
|
let pod_name = pod_list
|
||||||
|
.items
|
||||||
|
.first()
|
||||||
|
.expect("No matching pod")
|
||||||
|
.name()
|
||||||
|
.expect("Pod has no name")
|
||||||
|
.into_owned();
|
||||||
|
|
||||||
|
match api
|
||||||
|
.exec(
|
||||||
|
&pod_name,
|
||||||
|
command,
|
||||||
|
&AttachParams::default().stdout(true).stderr(true),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(e) => Err(e.to_string()),
|
||||||
|
Ok(mut process) => {
|
||||||
|
let status = process
|
||||||
|
.take_status()
|
||||||
|
.expect("No status handle")
|
||||||
|
.await
|
||||||
|
.expect("Status channel closed");
|
||||||
|
|
||||||
|
if let Some(s) = status.status {
|
||||||
|
let mut buf = String::new();
|
||||||
|
if let Some(mut stdout) = process.stdout() {
|
||||||
|
stdout
|
||||||
|
.read_to_string(&mut buf)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to read stdout: {e}"))?;
|
||||||
|
}
|
||||||
|
debug!("exec status: {} - {:?}", s, status.details);
|
||||||
|
if s == "Success" { Ok(buf) } else { Err(s) }
|
||||||
|
} else {
|
||||||
|
Err("No inner status from pod exec".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute a command in the first pod matching
|
||||||
|
/// `app.kubernetes.io/name={name}`.
|
||||||
|
pub async fn exec_app(
|
||||||
|
&self,
|
||||||
|
name: String,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
command: Vec<&str>,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let api: Api<Pod> = match namespace {
|
||||||
|
Some(ns) => Api::namespaced(self.client.clone(), ns),
|
||||||
|
None => Api::default_namespaced(self.client.clone()),
|
||||||
|
};
|
||||||
|
let pod_list = api
|
||||||
|
.list(&ListParams::default().labels(&format!("app.kubernetes.io/name={name}")))
|
||||||
|
.await
|
||||||
|
.expect("Failed to list pods");
|
||||||
|
|
||||||
|
let pod_name = pod_list
|
||||||
|
.items
|
||||||
|
.first()
|
||||||
|
.expect("No matching pod")
|
||||||
|
.name()
|
||||||
|
.expect("Pod has no name")
|
||||||
|
.into_owned();
|
||||||
|
|
||||||
|
match api.exec(&pod_name, command, &AttachParams::default()).await {
|
||||||
|
Err(e) => Err(e.to_string()),
|
||||||
|
Ok(mut process) => {
|
||||||
|
let status = process
|
||||||
|
.take_status()
|
||||||
|
.expect("No status handle")
|
||||||
|
.await
|
||||||
|
.expect("Status channel closed");
|
||||||
|
|
||||||
|
if let Some(s) = status.status {
|
||||||
|
debug!("exec status: {} - {:?}", s, status.details);
|
||||||
|
if s == "Success" { Ok(()) } else { Err(s) }
|
||||||
|
} else {
|
||||||
|
Err("No inner status from pod exec".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
316
harmony-k8s/src/resources.rs
Normal file
316
harmony-k8s/src/resources.rs
Normal file
@@ -0,0 +1,316 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use k8s_openapi::api::{
|
||||||
|
apps::v1::Deployment,
|
||||||
|
core::v1::{Node, ServiceAccount},
|
||||||
|
};
|
||||||
|
use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
|
||||||
|
use kube::api::ApiResource;
|
||||||
|
use kube::{
|
||||||
|
Error, Resource,
|
||||||
|
api::{Api, DynamicObject, GroupVersionKind, ListParams, ObjectList},
|
||||||
|
runtime::conditions,
|
||||||
|
runtime::wait::await_condition,
|
||||||
|
};
|
||||||
|
use log::debug;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
use serde_json::Value;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::client::K8sClient;
|
||||||
|
use crate::types::ScopeResolver;
|
||||||
|
|
||||||
|
impl K8sClient {
|
||||||
|
pub async fn has_healthy_deployment_with_label(
|
||||||
|
&self,
|
||||||
|
namespace: &str,
|
||||||
|
label_selector: &str,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
let api: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
|
||||||
|
let list = api
|
||||||
|
.list(&ListParams::default().labels(label_selector))
|
||||||
|
.await?;
|
||||||
|
for d in list.items {
|
||||||
|
let available = d
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.available_replicas)
|
||||||
|
.unwrap_or(0);
|
||||||
|
if available > 0 {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
|
||||||
|
if conds
|
||||||
|
.iter()
|
||||||
|
.any(|c| c.type_ == "Available" && c.status == "True")
|
||||||
|
{
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_namespaces_with_healthy_deployments(
|
||||||
|
&self,
|
||||||
|
label_selector: &str,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
|
let api: Api<Deployment> = Api::all(self.client.clone());
|
||||||
|
let list = api
|
||||||
|
.list(&ListParams::default().labels(label_selector))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut healthy_ns: HashMap<String, bool> = HashMap::new();
|
||||||
|
for d in list.items {
|
||||||
|
let ns = match d.metadata.namespace.clone() {
|
||||||
|
Some(n) => n,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let available = d
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.available_replicas)
|
||||||
|
.unwrap_or(0);
|
||||||
|
let is_healthy = if available > 0 {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
d.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.conditions.as_ref())
|
||||||
|
.map(|c| {
|
||||||
|
c.iter()
|
||||||
|
.any(|c| c.type_ == "Available" && c.status == "True")
|
||||||
|
})
|
||||||
|
.unwrap_or(false)
|
||||||
|
};
|
||||||
|
if is_healthy {
|
||||||
|
healthy_ns.insert(ns, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(healthy_ns.into_keys().collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_controller_service_account_name(
|
||||||
|
&self,
|
||||||
|
ns: &str,
|
||||||
|
) -> Result<Option<String>, Error> {
|
||||||
|
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
|
||||||
|
let list = api
|
||||||
|
.list(&ListParams::default().labels("app.kubernetes.io/component=controller"))
|
||||||
|
.await?;
|
||||||
|
if let Some(dep) = list.items.first() {
|
||||||
|
if let Some(sa) = dep
|
||||||
|
.spec
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.template.spec.as_ref())
|
||||||
|
.and_then(|s| s.service_account_name.clone())
|
||||||
|
{
|
||||||
|
return Ok(Some(sa));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_clusterrolebindings_json(&self) -> Result<Vec<Value>, Error> {
|
||||||
|
let gvk = GroupVersionKind::gvk("rbac.authorization.k8s.io", "v1", "ClusterRoleBinding");
|
||||||
|
let ar = ApiResource::from_gvk(&gvk);
|
||||||
|
let api: Api<DynamicObject> = Api::all_with(self.client.clone(), &ar);
|
||||||
|
let list = api.list(&ListParams::default()).await?;
|
||||||
|
Ok(list
|
||||||
|
.items
|
||||||
|
.into_iter()
|
||||||
|
.map(|o| serde_json::to_value(&o).unwrap_or(Value::Null))
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn is_service_account_cluster_wide(&self, sa: &str, ns: &str) -> Result<bool, Error> {
|
||||||
|
let sa_user = format!("system:serviceaccount:{ns}:{sa}");
|
||||||
|
for crb in self.list_clusterrolebindings_json().await? {
|
||||||
|
if let Some(subjects) = crb.get("subjects").and_then(|s| s.as_array()) {
|
||||||
|
for subj in subjects {
|
||||||
|
let kind = subj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
let name = subj.get("name").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
let subj_ns = subj.get("namespace").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
if (kind == "ServiceAccount" && name == sa && subj_ns == ns)
|
||||||
|
|| (kind == "User" && name == sa_user)
|
||||||
|
{
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn has_crd(&self, name: &str) -> Result<bool, Error> {
|
||||||
|
let api: Api<CustomResourceDefinition> = Api::all(self.client.clone());
|
||||||
|
let crds = api
|
||||||
|
.list(&ListParams::default().fields(&format!("metadata.name={name}")))
|
||||||
|
.await?;
|
||||||
|
Ok(!crds.items.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
|
||||||
|
Api::namespaced(self.client.clone(), namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_resource_json_value(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
gvk: &GroupVersionKind,
|
||||||
|
) -> Result<DynamicObject, Error> {
|
||||||
|
let ar = ApiResource::from_gvk(gvk);
|
||||||
|
let api: Api<DynamicObject> = match namespace {
|
||||||
|
Some(ns) => Api::namespaced_with(self.client.clone(), ns, &ar),
|
||||||
|
None => Api::default_namespaced_with(self.client.clone(), &ar),
|
||||||
|
};
|
||||||
|
api.get(name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_secret_json_value(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
) -> Result<DynamicObject, Error> {
|
||||||
|
self.get_resource_json_value(
|
||||||
|
name,
|
||||||
|
namespace,
|
||||||
|
&GroupVersionKind {
|
||||||
|
group: String::new(),
|
||||||
|
version: "v1".to_string(),
|
||||||
|
kind: "Secret".to_string(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_deployment(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
) -> Result<Option<Deployment>, Error> {
|
||||||
|
let api: Api<Deployment> = match namespace {
|
||||||
|
Some(ns) => {
|
||||||
|
debug!("Getting namespaced deployment '{name}' in '{ns}'");
|
||||||
|
Api::namespaced(self.client.clone(), ns)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
debug!("Getting deployment '{name}' in default namespace");
|
||||||
|
Api::default_namespaced(self.client.clone())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
api.get_opt(name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn scale_deployment(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
replicas: u32,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let api: Api<Deployment> = match namespace {
|
||||||
|
Some(ns) => Api::namespaced(self.client.clone(), ns),
|
||||||
|
None => Api::default_namespaced(self.client.clone()),
|
||||||
|
};
|
||||||
|
use kube::api::{Patch, PatchParams};
|
||||||
|
use serde_json::json;
|
||||||
|
let patch = json!({ "spec": { "replicas": replicas } });
|
||||||
|
api.patch_scale(name, &PatchParams::default(), &Patch::Merge(&patch))
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn delete_deployment(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let api: Api<Deployment> = match namespace {
|
||||||
|
Some(ns) => Api::namespaced(self.client.clone(), ns),
|
||||||
|
None => Api::default_namespaced(self.client.clone()),
|
||||||
|
};
|
||||||
|
api.delete(name, &kube::api::DeleteParams::default())
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn wait_until_deployment_ready(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
timeout: Option<Duration>,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let api: Api<Deployment> = match namespace {
|
||||||
|
Some(ns) => Api::namespaced(self.client.clone(), ns),
|
||||||
|
None => Api::default_namespaced(self.client.clone()),
|
||||||
|
};
|
||||||
|
let timeout = timeout.unwrap_or(Duration::from_secs(120));
|
||||||
|
let establish = await_condition(api, name, conditions::is_deployment_completed());
|
||||||
|
tokio::time::timeout(timeout, establish)
|
||||||
|
.await
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(|_| "Timed out waiting for deployment".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a single named resource, using the correct API scope for `K`.
|
||||||
|
pub async fn get_resource<K>(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
) -> Result<Option<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::Scope: ScopeResolver<K>,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
let api: Api<K> =
|
||||||
|
<<K as Resource>::Scope as ScopeResolver<K>>::get_api(&self.client, namespace);
|
||||||
|
api.get_opt(name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_resources<K>(
|
||||||
|
&self,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
list_params: Option<ListParams>,
|
||||||
|
) -> Result<ObjectList<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::Scope: ScopeResolver<K>,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
let api: Api<K> =
|
||||||
|
<<K as Resource>::Scope as ScopeResolver<K>>::get_api(&self.client, namespace);
|
||||||
|
api.list(&list_params.unwrap_or_default()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_all_resources_with_labels<K>(&self, labels: &str) -> Result<Vec<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
Api::<K>::all(self.client.clone())
|
||||||
|
.list(&ListParams::default().labels(labels))
|
||||||
|
.await
|
||||||
|
.map(|l| l.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_all_resource_in_all_namespace<K>(&self) -> Result<Vec<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::Scope: ScopeResolver<K>,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
Api::<K>::all(self.client.clone())
|
||||||
|
.list(&Default::default())
|
||||||
|
.await
|
||||||
|
.map(|l| l.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_nodes(
|
||||||
|
&self,
|
||||||
|
list_params: Option<ListParams>,
|
||||||
|
) -> Result<ObjectList<Node>, Error> {
|
||||||
|
self.list_resources(None, list_params).await
|
||||||
|
}
|
||||||
|
}
|
||||||
100
harmony-k8s/src/types.rs
Normal file
100
harmony-k8s/src/types.rs
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use k8s_openapi::{ClusterResourceScope, NamespaceResourceScope};
|
||||||
|
use kube::{Api, Client, Resource};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
/// Which Kubernetes distribution is running. Detected once at runtime via
|
||||||
|
/// [`crate::discovery::K8sClient::get_k8s_distribution`].
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||||
|
pub enum KubernetesDistribution {
|
||||||
|
Default,
|
||||||
|
OpenshiftFamily,
|
||||||
|
K3sFamily,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A file to be written to a node's filesystem.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct NodeFile {
|
||||||
|
/// Absolute path on the host where the file should be written.
|
||||||
|
pub path: String,
|
||||||
|
/// Content of the file.
|
||||||
|
pub content: String,
|
||||||
|
/// UNIX permissions (e.g. `0o600`).
|
||||||
|
pub mode: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Options controlling the behaviour of a [`crate::K8sClient::drain_node`] operation.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct DrainOptions {
|
||||||
|
/// Evict pods that use `emptyDir` volumes (ephemeral data is lost).
|
||||||
|
/// Equivalent to `kubectl drain --delete-emptydir-data`.
|
||||||
|
pub delete_emptydir_data: bool,
|
||||||
|
/// Silently skip DaemonSet-managed pods instead of blocking the drain.
|
||||||
|
/// Equivalent to `kubectl drain --ignore-daemonsets`.
|
||||||
|
pub ignore_daemonsets: bool,
|
||||||
|
/// Maximum wall-clock time to wait for all evictions to complete.
|
||||||
|
pub timeout: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DrainOptions {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
delete_emptydir_data: false,
|
||||||
|
ignore_daemonsets: true,
|
||||||
|
timeout: Duration::from_secs(1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DrainOptions {
|
||||||
|
pub fn default_ignore_daemonset_delete_emptydir_data() -> Self {
|
||||||
|
Self {
|
||||||
|
delete_emptydir_data: true,
|
||||||
|
ignore_daemonsets: true,
|
||||||
|
..Self::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Controls how [`crate::K8sClient::apply_with_strategy`] behaves when the
|
||||||
|
/// resource already exists (or does not).
|
||||||
|
pub enum WriteMode {
|
||||||
|
/// Server-side apply; create if absent, update if present (default).
|
||||||
|
CreateOrUpdate,
|
||||||
|
/// POST only; return an error if the resource already exists.
|
||||||
|
Create,
|
||||||
|
/// Server-side apply only; return an error if the resource does not exist.
|
||||||
|
Update,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Scope resolution trait ───────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Resolves the correct [`kube::Api`] for a resource type based on its scope
|
||||||
|
/// (cluster-wide vs. namespace-scoped).
|
||||||
|
pub trait ScopeResolver<K: Resource> {
|
||||||
|
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K> ScopeResolver<K> for ClusterResourceScope
|
||||||
|
where
|
||||||
|
K: Resource<Scope = ClusterResourceScope>,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
fn get_api(client: &Client, _ns: Option<&str>) -> Api<K> {
|
||||||
|
Api::all(client.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K> ScopeResolver<K> for NamespaceResourceScope
|
||||||
|
where
|
||||||
|
K: Resource<Scope = NamespaceResourceScope>,
|
||||||
|
<K as Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
fn get_api(client: &Client, ns: Option<&str>) -> Api<K> {
|
||||||
|
match ns {
|
||||||
|
Some(ns) => Api::namespaced(client.clone(), ns),
|
||||||
|
None => Api::default_namespaced(client.clone()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -21,6 +21,8 @@ semver = "1.0.23"
|
|||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
tokio-retry.workspace = true
|
||||||
|
tokio-util.workspace = true
|
||||||
derive-new.workspace = true
|
derive-new.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
@@ -31,6 +33,7 @@ opnsense-config-xml = { path = "../opnsense-config-xml" }
|
|||||||
harmony_macros = { path = "../harmony_macros" }
|
harmony_macros = { path = "../harmony_macros" }
|
||||||
harmony_types = { path = "../harmony_types" }
|
harmony_types = { path = "../harmony_types" }
|
||||||
harmony_execution = { path = "../harmony_execution" }
|
harmony_execution = { path = "../harmony_execution" }
|
||||||
|
harmony-k8s = { path = "../harmony-k8s" }
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
kube = { workspace = true, features = ["derive"] }
|
kube = { workspace = true, features = ["derive"] }
|
||||||
@@ -60,7 +63,6 @@ temp-dir = "0.1.14"
|
|||||||
dyn-clone = "1.0.19"
|
dyn-clone = "1.0.19"
|
||||||
similar.workspace = true
|
similar.workspace = true
|
||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
tokio-util = "0.7.15"
|
|
||||||
strum = { version = "0.27.1", features = ["derive"] }
|
strum = { version = "0.27.1", features = ["derive"] }
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
serde_with = "3.14.0"
|
serde_with = "3.14.0"
|
||||||
@@ -80,7 +82,7 @@ sqlx.workspace = true
|
|||||||
inquire.workspace = true
|
inquire.workspace = true
|
||||||
brocade = { path = "../brocade" }
|
brocade = { path = "../brocade" }
|
||||||
option-ext = "0.2.0"
|
option-ext = "0.2.0"
|
||||||
tokio-retry = "0.3.0"
|
rand.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ use std::error::Error;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
|
||||||
use crate::inventory::HostRole;
|
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
id::Id,
|
id::Id,
|
||||||
@@ -16,7 +17,7 @@ use super::{
|
|||||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, NetworkError,
|
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, NetworkError,
|
||||||
NetworkManager, PreparationError, PreparationOutcome, Router, Switch, SwitchClient,
|
NetworkManager, PreparationError, PreparationOutcome, Router, Switch, SwitchClient,
|
||||||
SwitchError, TftpServer, Topology, k8s::K8sClient,
|
SwitchError, TftpServer, Topology,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
process::Command,
|
process::Command,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use base64::{Engine, engine::general_purpose};
|
use base64::{Engine, engine::general_purpose};
|
||||||
|
use harmony_k8s::{K8sClient, KubernetesDistribution};
|
||||||
use harmony_types::rfc1123::Rfc1123Name;
|
use harmony_types::rfc1123::Rfc1123Name;
|
||||||
use k8s_openapi::api::{
|
use k8s_openapi::api::{
|
||||||
core::v1::{Pod, Secret},
|
core::v1::{Pod, Secret},
|
||||||
@@ -58,7 +59,6 @@ use crate::{
|
|||||||
use super::super::{
|
use super::super::{
|
||||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
|
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
|
||||||
PreparationOutcome, Topology,
|
PreparationOutcome, Topology,
|
||||||
k8s::K8sClient,
|
|
||||||
oberservability::monitoring::AlertReceiver,
|
oberservability::monitoring::AlertReceiver,
|
||||||
tenant::{
|
tenant::{
|
||||||
TenantConfig, TenantManager,
|
TenantConfig, TenantManager,
|
||||||
@@ -76,13 +76,6 @@ struct K8sState {
|
|||||||
message: String,
|
message: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub enum KubernetesDistribution {
|
|
||||||
OpenshiftFamily,
|
|
||||||
K3sFamily,
|
|
||||||
Default,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
enum K8sSource {
|
enum K8sSource {
|
||||||
LocalK3d,
|
LocalK3d,
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::Outcome,
|
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::postgresql::{
|
modules::postgresql::{
|
||||||
K8sPostgreSQLScore,
|
K8sPostgreSQLScore,
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ pub mod tenant;
|
|||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
pub use k8s_anywhere::*;
|
pub use k8s_anywhere::*;
|
||||||
pub use localhost::*;
|
pub use localhost::*;
|
||||||
pub mod k8s;
|
|
||||||
mod load_balancer;
|
mod load_balancer;
|
||||||
pub mod router;
|
pub mod router;
|
||||||
mod tftp;
|
mod tftp;
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ use std::{
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use brocade::PortOperatingMode;
|
use brocade::PortOperatingMode;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
id::Id,
|
id::Id,
|
||||||
net::{IpAddress, MacAddress},
|
net::{IpAddress, MacAddress},
|
||||||
@@ -18,7 +19,7 @@ use serde::Serialize;
|
|||||||
|
|
||||||
use crate::executors::ExecutorError;
|
use crate::executors::ExecutorError;
|
||||||
|
|
||||||
use super::{LogicalHost, k8s::K8sClient};
|
use super::LogicalHost;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DHCPStaticEntry {
|
pub struct DHCPStaticEntry {
|
||||||
|
|||||||
@@ -1,10 +1,8 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::{
|
use crate::executors::ExecutorError;
|
||||||
executors::ExecutorError,
|
|
||||||
topology::k8s::{ApplyStrategy, K8sClient},
|
|
||||||
};
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use k8s_openapi::{
|
use k8s_openapi::{
|
||||||
api::{
|
api::{
|
||||||
core::v1::{LimitRange, Namespace, ResourceQuota},
|
core::v1::{LimitRange, Namespace, ResourceQuota},
|
||||||
@@ -14,7 +12,7 @@ use k8s_openapi::{
|
|||||||
},
|
},
|
||||||
apimachinery::pkg::util::intstr::IntOrString,
|
apimachinery::pkg::util::intstr::IntOrString,
|
||||||
};
|
};
|
||||||
use kube::{Resource, api::DynamicObject};
|
use kube::Resource;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
@@ -59,7 +57,6 @@ impl K8sTenantManager {
|
|||||||
) -> Result<K, ExecutorError>
|
) -> Result<K, ExecutorError>
|
||||||
where
|
where
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
<K as kube::Resource>::Scope: ApplyStrategy<K>,
|
|
||||||
{
|
{
|
||||||
self.apply_labels(&mut resource, config);
|
self.apply_labels(&mut resource, config);
|
||||||
self.k8s_client
|
self.k8s_client
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use std::{
|
|||||||
|
|
||||||
use askama::Template;
|
use askama::Template;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_k8s::{DrainOptions, K8sClient, NodeFile};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use k8s_openapi::api::core::v1::Node;
|
use k8s_openapi::api::core::v1::Node;
|
||||||
use kube::{
|
use kube::{
|
||||||
@@ -15,10 +16,7 @@ use log::{debug, info, warn};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::okd::crd::nmstate,
|
modules::okd::crd::nmstate,
|
||||||
topology::{
|
topology::{HostNetworkConfig, NetworkError, NetworkManager},
|
||||||
HostNetworkConfig, NetworkError, NetworkManager,
|
|
||||||
k8s::{DrainOptions, K8sClient, NodeFile},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// NetworkManager bond configuration template
|
/// NetworkManager bond configuration template
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::{debug, info, trace};
|
use log::{debug, info};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use log::{debug, info, trace, warn};
|
use log::{debug, info, trace, warn};
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
@@ -14,7 +15,7 @@ use crate::{
|
|||||||
helm::chart::{HelmChartScore, HelmRepository},
|
helm::chart::{HelmChartScore, HelmRepository},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
|
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
|
|
||||||
use crate::{interpret::InterpretError, topology::k8s::K8sClient};
|
use crate::interpret::InterpretError;
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
pub enum ArgoScope {
|
pub enum ArgoScope {
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
use harmony_k8s::K8sClient;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -11,7 +12,7 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
|||||||
@@ -3,7 +3,8 @@ use std::sync::Arc;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::warn;
|
use log::warn;
|
||||||
|
|
||||||
use crate::topology::{FailoverTopology, K8sclient, k8s::K8sClient};
|
use crate::topology::{FailoverTopology, K8sclient};
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: K8sclient> K8sclient for FailoverTopology<T> {
|
impl<T: K8sclient> K8sclient for FailoverTopology<T> {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use k8s_openapi::{NamespaceResourceScope, ResourceScope};
|
use k8s_openapi::ResourceScope;
|
||||||
use kube::Resource;
|
use kube::Resource;
|
||||||
use log::info;
|
use log::info;
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
@@ -109,7 +109,7 @@ where
|
|||||||
topology
|
topology
|
||||||
.k8s_client()
|
.k8s_client()
|
||||||
.await
|
.await
|
||||||
.expect("Environment should provide enough information to instanciate a client")
|
.map_err(|e| InterpretError::new(format!("Failed to get k8s client : {e}")))?
|
||||||
.apply_many(&self.score.resource, self.score.namespace.as_deref())
|
.apply_many(&self.score.resource, self.score.namespace.as_deref())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use schemars::JsonSchema;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::InterpretError,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
monitoring::{
|
monitoring::{
|
||||||
@@ -17,10 +17,10 @@ use crate::{
|
|||||||
topology::{
|
topology::{
|
||||||
K8sclient, Topology,
|
K8sclient, Topology,
|
||||||
installable::Installable,
|
installable::Installable,
|
||||||
k8s::K8sClient,
|
|
||||||
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
|
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
|
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
#[kube(
|
#[kube(
|
||||||
|
|||||||
@@ -4,10 +4,8 @@ use kube::CustomResource;
|
|||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::topology::{
|
use crate::topology::oberservability::monitoring::{AlertReceiver, AlertSender};
|
||||||
k8s::K8sClient,
|
use harmony_k8s::K8sClient;
|
||||||
oberservability::monitoring::{AlertReceiver, AlertSender},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
#[kube(
|
#[kube(
|
||||||
|
|||||||
@@ -11,8 +11,9 @@ use crate::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
|
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient},
|
topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||||
};
|
};
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
use std::{collections::BTreeMap, sync::Arc};
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
|
|
||||||
use crate::{
|
use crate::interpret::{InterpretError, Outcome};
|
||||||
interpret::{InterpretError, Outcome},
|
use harmony_k8s::K8sClient;
|
||||||
topology::k8s::K8sClient,
|
|
||||||
};
|
|
||||||
use k8s_openapi::api::core::v1::ConfigMap;
|
use k8s_openapi::api::core::v1::ConfigMap;
|
||||||
use kube::api::ObjectMeta;
|
use kube::api::ObjectMeta;
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::{collections::BTreeMap, str::FromStr};
|
use std::{collections::BTreeMap, str::FromStr};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_k8s::KubernetesDistribution;
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
@@ -25,7 +26,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HelmCommand, K8sclient, KubernetesDistribution, TlsRouter, Topology},
|
topology::{HelmCommand, K8sclient, TlsRouter, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use kube::{CustomResource, api::ObjectMeta};
|
use kube::{CustomResource, api::ObjectMeta};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@@ -13,9 +15,14 @@ use serde::{Deserialize, Serialize};
|
|||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct ClusterSpec {
|
pub struct ClusterSpec {
|
||||||
pub instances: u32,
|
pub instances: u32,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub image_name: Option<String>,
|
pub image_name: Option<String>,
|
||||||
pub storage: Storage,
|
pub storage: Storage,
|
||||||
pub bootstrap: Bootstrap,
|
pub bootstrap: Bootstrap,
|
||||||
|
/// This must be set to None if you want cnpg to generate a superuser secret
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub superuser_secret: Option<BTreeMap<String, String>>,
|
||||||
|
pub enable_superuser_access: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Cluster {
|
impl Default for Cluster {
|
||||||
@@ -34,6 +41,8 @@ impl Default for ClusterSpec {
|
|||||||
image_name: None,
|
image_name: None,
|
||||||
storage: Storage::default(),
|
storage: Storage::default(),
|
||||||
bootstrap: Bootstrap::default(),
|
bootstrap: Bootstrap::default(),
|
||||||
|
superuser_secret: None,
|
||||||
|
enable_superuser_access: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use crate::topology::{K8sclient, Topology};
|
|||||||
/// # Usage
|
/// # Usage
|
||||||
/// ```
|
/// ```
|
||||||
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
||||||
/// let score = CloudNativePgOperatorScore::default();
|
/// let score = CloudNativePgOperatorScore::default_openshift();
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// Or, you can take control of most relevant fiedls this way :
|
/// Or, you can take control of most relevant fiedls this way :
|
||||||
@@ -52,8 +52,8 @@ pub struct CloudNativePgOperatorScore {
|
|||||||
pub source_namespace: String,
|
pub source_namespace: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for CloudNativePgOperatorScore {
|
impl CloudNativePgOperatorScore {
|
||||||
fn default() -> Self {
|
pub fn default_openshift() -> Self {
|
||||||
Self {
|
Self {
|
||||||
namespace: "openshift-operators".to_string(),
|
namespace: "openshift-operators".to_string(),
|
||||||
channel: "stable-v1".to_string(),
|
channel: "stable-v1".to_string(),
|
||||||
@@ -68,7 +68,7 @@ impl CloudNativePgOperatorScore {
|
|||||||
pub fn new(namespace: &str) -> Self {
|
pub fn new(namespace: &str) -> Self {
|
||||||
Self {
|
Self {
|
||||||
namespace: namespace.to_string(),
|
namespace: namespace.to_string(),
|
||||||
..Default::default()
|
..Self::default_openshift()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::interpret::Interpret;
|
use crate::interpret::Interpret;
|
||||||
@@ -66,6 +68,11 @@ impl<T: Topology + K8sclient> Score<T> for K8sPostgreSQLScore {
|
|||||||
owner: "app".to_string(),
|
owner: "app".to_string(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// superuser_secret: Some(BTreeMap::from([(
|
||||||
|
// "name".to_string(),
|
||||||
|
// format!("{}-superuser", self.config.cluster_name.clone()),
|
||||||
|
// )])),
|
||||||
|
enable_superuser_access: true,
|
||||||
..ClusterSpec::default()
|
..ClusterSpec::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,7 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::C
|
|||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
|
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
|
||||||
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
|
||||||
GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSecretKeyRef, GrafanaSpec,
|
GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSpec,
|
||||||
GrafanaValueFrom, GrafanaValueSource,
|
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
|
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
|
||||||
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
PrometheusRule, PrometheusRuleSpec, RuleGroup,
|
||||||
@@ -23,7 +22,7 @@ use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
|
|||||||
ServiceMonitor, ServiceMonitorSpec,
|
ServiceMonitor, ServiceMonitorSpec,
|
||||||
};
|
};
|
||||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
use crate::topology::{K8sclient, Topology};
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
@@ -38,6 +37,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
};
|
};
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
use super::prometheus::PrometheusMonitoring;
|
use super::prometheus::PrometheusMonitoring;
|
||||||
|
|||||||
@@ -30,12 +30,13 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
|
|||||||
use crate::score::Score;
|
use crate::score::Score;
|
||||||
use crate::topology::ingress::Ingress;
|
use crate::topology::ingress::Ingress;
|
||||||
use crate::topology::oberservability::monitoring::AlertReceiver;
|
use crate::topology::oberservability::monitoring::AlertReceiver;
|
||||||
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
|
use crate::topology::{K8sclient, Topology};
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
};
|
};
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
use super::prometheus::PrometheusMonitoring;
|
use super::prometheus::PrometheusMonitoring;
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use log::{debug, warn};
|
use log::{debug, warn};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
@@ -13,7 +14,7 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
|
|||||||
@@ -9,8 +9,9 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
|
use harmony_k8s::K8sClient;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
|||||||
@@ -1,43 +1,476 @@
|
|||||||
|
use k8s_openapi::api::core::v1::Namespace;
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use k8s_openapi::{ByteString, api::core::v1::Secret};
|
||||||
|
use kube::{Error as KubeError, core::ErrorResponse};
|
||||||
|
use rand::distr::Distribution;
|
||||||
|
use rand::{Rng, rng, seq::SliceRandom};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use harmony_types::storage::StorageSize;
|
||||||
|
use log::{debug, error, info, trace, warn};
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::Interpret,
|
data::Version,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||||
|
modules::k8s::resource::K8sResourceScore,
|
||||||
|
modules::postgresql::capability::{PostgreSQL, PostgreSQLClusterRole, PostgreSQLConfig},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HelmCommand, K8sclient, Topology},
|
topology::{HelmCommand, K8sclient, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const NAMESPACE: &str = "zitadel";
|
||||||
|
const PG_CLUSTER_NAME: &str = "zitadel-pg";
|
||||||
|
const MASTERKEY_SECRET_NAME: &str = "zitadel-masterkey";
|
||||||
|
|
||||||
|
/// Opinionated Zitadel deployment score.
|
||||||
|
///
|
||||||
|
/// Deploys a PostgreSQL cluster (via the [`PostgreSQL`] trait) and the Zitadel
|
||||||
|
/// Helm chart into the same namespace. Intended as a central multi-tenant IdP
|
||||||
|
/// with SSO for OKD/OpenShift, OpenBao, Harbor, Grafana, Nextcloud, Ente
|
||||||
|
/// Photos, and others.
|
||||||
|
///
|
||||||
|
/// # Ingress annotations
|
||||||
|
/// No controller-specific ingress annotations are set by default. On
|
||||||
|
/// OKD/OpenShift, the ingress should request TLS so the generated Route is
|
||||||
|
/// edge-terminated instead of HTTP-only. Optional cert-manager annotations are
|
||||||
|
/// included for clusters that have cert-manager installed; clusters without
|
||||||
|
/// cert-manager will ignore them.
|
||||||
|
/// Add or adjust annotations via `values_overrides` depending on your
|
||||||
|
/// distribution:
|
||||||
|
/// - NGINX: `nginx.ingress.kubernetes.io/backend-protocol: GRPC`
|
||||||
|
/// - OpenShift HAProxy: `route.openshift.io/termination: edge`
|
||||||
|
/// - AWS ALB: set `ingress.controller: aws`
|
||||||
|
|
||||||
|
///
|
||||||
|
/// # Database credentials
|
||||||
|
/// CNPG creates a `<cluster>-superuser` secret with key `password`. Because
|
||||||
|
/// `envVarsSecret` injects secret keys verbatim as env var names and the CNPG
|
||||||
|
/// key (`password`) does not match ZITADEL's expected name
|
||||||
|
/// (`ZITADEL_DATABASE_POSTGRES_USER_PASSWORD`), individual `env` entries with
|
||||||
|
/// `valueFrom.secretKeyRef` are used instead. For environments with an
|
||||||
|
/// External Secrets Operator or similar, create a dedicated secret with the
|
||||||
|
/// correct ZITADEL env var names and switch to `envVarsSecret`.
|
||||||
#[derive(Debug, Serialize, Clone)]
|
#[derive(Debug, Serialize, Clone)]
|
||||||
pub struct ZitadelScore {
|
pub struct ZitadelScore {
|
||||||
/// Host used for external access (ingress)
|
/// External domain (e.g. `"auth.example.com"`).
|
||||||
pub host: String,
|
pub host: String,
|
||||||
|
pub zitadel_version: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + HelmCommand> Score<T> for ZitadelScore {
|
impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelScore {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"ZitadelScore".to_string()
|
"ZitadelScore".to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
// TODO exec pod commands to initialize secret store if not already done
|
Box::new(ZitadelInterpret {
|
||||||
|
host: self.host.clone(),
|
||||||
|
zitadel_version: self.zitadel_version.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct ZitadelInterpret {
|
||||||
|
host: String,
|
||||||
|
zitadel_version: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for ZitadelInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
info!(
|
||||||
|
"[Zitadel] Starting full deployment — namespace: '{NAMESPACE}', host: '{}'",
|
||||||
|
self.host
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("Creating namespace {NAMESPACE} if it does not exist");
|
||||||
|
K8sResourceScore::single(
|
||||||
|
Namespace {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(NAMESPACE.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// --- Step 1: PostgreSQL -------------------------------------------
|
||||||
|
|
||||||
|
let pg_config = PostgreSQLConfig {
|
||||||
|
cluster_name: PG_CLUSTER_NAME.to_string(),
|
||||||
|
instances: 2,
|
||||||
|
storage_size: StorageSize::gi(10),
|
||||||
|
role: PostgreSQLClusterRole::Primary,
|
||||||
|
namespace: NAMESPACE.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[Zitadel] Deploying PostgreSQL cluster '{}' — instances: {}, storage: 10Gi, namespace: '{}'",
|
||||||
|
pg_config.cluster_name, pg_config.instances, pg_config.namespace
|
||||||
|
);
|
||||||
|
|
||||||
|
topology.deploy(&pg_config).await.map_err(|e| {
|
||||||
|
let msg = format!(
|
||||||
|
"[Zitadel] PostgreSQL deployment failed for '{}': {e}",
|
||||||
|
pg_config.cluster_name
|
||||||
|
);
|
||||||
|
error!("{msg}");
|
||||||
|
InterpretError::new(msg)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"[Zitadel] PostgreSQL cluster '{}' deployed",
|
||||||
|
pg_config.cluster_name
|
||||||
|
);
|
||||||
|
|
||||||
|
// --- Step 2: Resolve internal DB endpoint -------------------------
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[Zitadel] Resolving internal endpoint for cluster '{}'",
|
||||||
|
pg_config.cluster_name
|
||||||
|
);
|
||||||
|
|
||||||
|
let endpoint = topology.get_endpoint(&pg_config).await.map_err(|e| {
|
||||||
|
let msg = format!(
|
||||||
|
"[Zitadel] Failed to resolve endpoint for cluster '{}': {e}",
|
||||||
|
pg_config.cluster_name
|
||||||
|
);
|
||||||
|
error!("{msg}");
|
||||||
|
InterpretError::new(msg)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"[Zitadel] DB endpoint resolved — host: '{}', port: {}",
|
||||||
|
endpoint.host, endpoint.port
|
||||||
|
);
|
||||||
|
|
||||||
|
// The CNPG-managed superuser secret contains 'password', 'username',
|
||||||
|
// 'host', 'port', 'dbname', 'uri'. We reference 'password' directly
|
||||||
|
// via env.valueFrom.secretKeyRef because CNPG's key names do not
|
||||||
|
// match ZITADEL's required env var names.
|
||||||
|
let pg_user_secret = format!("{PG_CLUSTER_NAME}-app");
|
||||||
|
let pg_superuser_secret = format!("{PG_CLUSTER_NAME}-superuser");
|
||||||
|
let db_host = &endpoint.host;
|
||||||
|
let db_port = endpoint.port;
|
||||||
let host = &self.host;
|
let host = &self.host;
|
||||||
|
|
||||||
let values_yaml = Some(format!(r#""#));
|
debug!("[Zitadel] DB credentials source — secret: '{pg_user_secret}', key: 'password'");
|
||||||
|
debug!(
|
||||||
|
"[Zitadel] DB credentials source — superuser secret: '{pg_superuser_secret}', key: 'password'"
|
||||||
|
);
|
||||||
|
|
||||||
todo!("This is not complete yet");
|
// Zitadel requires one symbol, one number and more. So let's force it.
|
||||||
|
fn generate_secure_password(length: usize) -> String {
|
||||||
|
const ALPHA_UPPER: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||||
|
const ALPHA_LOWER: &[u8] = b"abcdefghijklmnopqrstuvwxyz";
|
||||||
|
const DIGITS: &[u8] = b"0123456789";
|
||||||
|
const SYMBOLS: &[u8] = b"!@#$%^&*()_+-=[]{}|;:',.<>?/";
|
||||||
|
|
||||||
HelmChartScore {
|
let mut rng = rand::rng();
|
||||||
namespace: Some(NonBlankString::from_str("zitadel").unwrap()),
|
let uniform_alpha_upper = rand::distr::Uniform::new(0, ALPHA_UPPER.len())
|
||||||
|
.expect("Failed to create distribution");
|
||||||
|
let uniform_alpha_lower = rand::distr::Uniform::new(0, ALPHA_LOWER.len())
|
||||||
|
.expect("Failed to create distribution");
|
||||||
|
let uniform_digits =
|
||||||
|
rand::distr::Uniform::new(0, DIGITS.len()).expect("Failed to create distribution");
|
||||||
|
let uniform_symbols =
|
||||||
|
rand::distr::Uniform::new(0, SYMBOLS.len()).expect("Failed to create distribution");
|
||||||
|
|
||||||
|
let mut chars: Vec<char> = Vec::with_capacity(length);
|
||||||
|
|
||||||
|
// Ensure at least one of each: upper, lower, digit, symbol
|
||||||
|
chars.push(ALPHA_UPPER[uniform_alpha_upper.sample(&mut rng)] as char);
|
||||||
|
chars.push(ALPHA_LOWER[uniform_alpha_lower.sample(&mut rng)] as char);
|
||||||
|
chars.push(DIGITS[uniform_digits.sample(&mut rng)] as char);
|
||||||
|
chars.push(SYMBOLS[uniform_symbols.sample(&mut rng)] as char);
|
||||||
|
|
||||||
|
// Fill remaining with random from all categories
|
||||||
|
let all_chars: Vec<u8> = [ALPHA_UPPER, ALPHA_LOWER, DIGITS, SYMBOLS].concat();
|
||||||
|
|
||||||
|
let uniform_all = rand::distr::Uniform::new(0, all_chars.len())
|
||||||
|
.expect("Failed to create distribution");
|
||||||
|
|
||||||
|
for _ in 0..(length - 4) {
|
||||||
|
chars.push(all_chars[uniform_all.sample(&mut rng)] as char);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shuffle
|
||||||
|
let mut shuffled = chars;
|
||||||
|
shuffled.shuffle(&mut rng);
|
||||||
|
|
||||||
|
return shuffled.iter().collect();
|
||||||
|
}
|
||||||
|
|
||||||
|
let admin_password = generate_secure_password(16);
|
||||||
|
|
||||||
|
// --- Step 3: Create masterkey secret ------------------------------------
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[Zitadel] Creating masterkey secret '{}' in namespace '{}'",
|
||||||
|
MASTERKEY_SECRET_NAME, NAMESPACE
|
||||||
|
);
|
||||||
|
|
||||||
|
// Masterkey for symmetric encryption — must be exactly 32 ASCII bytes (alphanumeric only).
|
||||||
|
let masterkey = rng()
|
||||||
|
.sample_iter(&rand::distr::Alphanumeric)
|
||||||
|
.take(32)
|
||||||
|
.map(char::from)
|
||||||
|
.collect::<String>();
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[Zitadel] Created masterkey secret '{}' in namespace '{}'",
|
||||||
|
MASTERKEY_SECRET_NAME, NAMESPACE
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut masterkey_data: BTreeMap<String, ByteString> = BTreeMap::new();
|
||||||
|
masterkey_data.insert("masterkey".to_string(), ByteString(masterkey.into()));
|
||||||
|
|
||||||
|
let masterkey_secret = Secret {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(MASTERKEY_SECRET_NAME.to_string()),
|
||||||
|
namespace: Some(NAMESPACE.to_string()),
|
||||||
|
..ObjectMeta::default()
|
||||||
|
},
|
||||||
|
data: Some(masterkey_data),
|
||||||
|
..Secret::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
match topology
|
||||||
|
.k8s_client()
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("Failed to get k8s client : {e}")))?
|
||||||
|
.create(&masterkey_secret, Some(NAMESPACE))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
info!(
|
||||||
|
"[Zitadel] Masterkey secret '{}' created",
|
||||||
|
MASTERKEY_SECRET_NAME
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(KubeError::Api(ErrorResponse { code: 409, .. })) => {
|
||||||
|
info!(
|
||||||
|
"[Zitadel] Masterkey secret '{}' already exists, leaving it untouched",
|
||||||
|
MASTERKEY_SECRET_NAME
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(other) => {
|
||||||
|
let msg = format!(
|
||||||
|
"[Zitadel] Failed to create masterkey secret '{}': {other}",
|
||||||
|
MASTERKEY_SECRET_NAME
|
||||||
|
);
|
||||||
|
error!("{msg}");
|
||||||
|
return Err(InterpretError::new(msg));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[Zitadel] Masterkey secret '{}' created successfully",
|
||||||
|
MASTERKEY_SECRET_NAME
|
||||||
|
);
|
||||||
|
|
||||||
|
// --- Step 4: Build Helm values ------------------------------------
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
"[Zitadel] Applying TLS-enabled ingress defaults for OKD/OpenShift. \
|
||||||
|
cert-manager annotations are included as optional hints and are \
|
||||||
|
ignored on clusters without cert-manager."
|
||||||
|
);
|
||||||
|
|
||||||
|
let values_yaml = format!(
|
||||||
|
r#"image:
|
||||||
|
tag: {zitadel_version}
|
||||||
|
zitadel:
|
||||||
|
masterkeySecretName: "{MASTERKEY_SECRET_NAME}"
|
||||||
|
configmapConfig:
|
||||||
|
ExternalDomain: "{host}"
|
||||||
|
ExternalSecure: true
|
||||||
|
FirstInstance:
|
||||||
|
Org:
|
||||||
|
Human:
|
||||||
|
UserName: "admin"
|
||||||
|
Password: "{admin_password}"
|
||||||
|
FirstName: "Zitadel"
|
||||||
|
LastName: "Admin"
|
||||||
|
Email: "admin@zitadel.example.com"
|
||||||
|
PasswordChangeRequired: true
|
||||||
|
TLS:
|
||||||
|
Enabled: false
|
||||||
|
Database:
|
||||||
|
Postgres:
|
||||||
|
Host: "{db_host}"
|
||||||
|
Port: {db_port}
|
||||||
|
Database: zitadel
|
||||||
|
MaxOpenConns: 20
|
||||||
|
MaxIdleConns: 10
|
||||||
|
User:
|
||||||
|
Username: postgres
|
||||||
|
SSL:
|
||||||
|
Mode: require
|
||||||
|
Admin:
|
||||||
|
Username: postgres
|
||||||
|
SSL:
|
||||||
|
Mode: require
|
||||||
|
# Directly import credentials from the postgres secret
|
||||||
|
# TODO : use a less privileged postgres user
|
||||||
|
env:
|
||||||
|
- name: ZITADEL_DATABASE_POSTGRES_USER_USERNAME
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: "{pg_superuser_secret}"
|
||||||
|
key: user
|
||||||
|
- name: ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: "{pg_superuser_secret}"
|
||||||
|
key: password
|
||||||
|
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: "{pg_superuser_secret}"
|
||||||
|
key: user
|
||||||
|
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: "{pg_superuser_secret}"
|
||||||
|
key: password
|
||||||
|
# Security context for OpenShift restricted PSA compliance
|
||||||
|
podSecurityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
# Init job security context (runs before main deployment)
|
||||||
|
initJob:
|
||||||
|
podSecurityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
# Setup job security context
|
||||||
|
setupJob:
|
||||||
|
podSecurityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
annotations:
|
||||||
|
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||||
|
route.openshift.io/termination: edge
|
||||||
|
hosts:
|
||||||
|
- host: "{host}"
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- "{host}"
|
||||||
|
secretName: "{host}-tls"
|
||||||
|
|
||||||
|
login:
|
||||||
|
enabled: true
|
||||||
|
podSecurityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: null
|
||||||
|
fsGroup: null
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
annotations:
|
||||||
|
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||||
|
route.openshift.io/termination: edge
|
||||||
|
hosts:
|
||||||
|
- host: "{host}"
|
||||||
|
paths:
|
||||||
|
- path: /ui/v2/login
|
||||||
|
pathType: Prefix
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- "{host}"
|
||||||
|
secretName: "{host}-tls""#,
|
||||||
|
zitadel_version = self.zitadel_version
|
||||||
|
);
|
||||||
|
|
||||||
|
trace!("[Zitadel] Helm values YAML:\n{values_yaml}");
|
||||||
|
|
||||||
|
// --- Step 5: Deploy Helm chart ------------------------------------
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"[Zitadel] Deploying Helm chart 'zitadel/zitadel' as release 'zitadel' in namespace '{NAMESPACE}'"
|
||||||
|
);
|
||||||
|
|
||||||
|
let result = HelmChartScore {
|
||||||
|
namespace: Some(NonBlankString::from_str(NAMESPACE).unwrap()),
|
||||||
release_name: NonBlankString::from_str("zitadel").unwrap(),
|
release_name: NonBlankString::from_str("zitadel").unwrap(),
|
||||||
chart_name: NonBlankString::from_str("zitadel/zitadel").unwrap(),
|
chart_name: NonBlankString::from_str("zitadel/zitadel").unwrap(),
|
||||||
chart_version: None,
|
chart_version: None,
|
||||||
values_overrides: None,
|
values_overrides: None,
|
||||||
values_yaml,
|
values_yaml: Some(values_yaml),
|
||||||
create_namespace: true,
|
create_namespace: true,
|
||||||
install_only: false,
|
install_only: false,
|
||||||
repository: Some(HelmRepository::new(
|
repository: Some(HelmRepository::new(
|
||||||
@@ -46,6 +479,40 @@ impl<T: Topology + K8sclient + HelmCommand> Score<T> for ZitadelScore {
|
|||||||
true,
|
true,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
.create_interpret()
|
.interpret(inventory, topology)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match &result {
|
||||||
|
Ok(_) => info!(
|
||||||
|
"[Zitadel] Helm chart deployed successfully\n\n\
|
||||||
|
===== ZITADEL DEPLOYMENT COMPLETE =====\n\
|
||||||
|
Login URL: https://{host}\n\
|
||||||
|
Username: admin@zitadel.{host}\n\
|
||||||
|
Password: {admin_password}\n\n\
|
||||||
|
IMPORTANT: The password is saved in ConfigMap 'zitadel-config-yaml'\n\
|
||||||
|
and must be changed on first login. Save the credentials in a\n\
|
||||||
|
secure location after changing them.\n\
|
||||||
|
========================================="
|
||||||
|
),
|
||||||
|
Err(e) => error!("[Zitadel] Helm chart deployment failed: {e}"),
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("Zitadel")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
vec![]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user