Compare commits

..

1 Commits

Author SHA1 Message Date
09457b89d8 Add disclaimer demo slide and discord qr code 2025-09-15 13:31:50 -04:00
9 changed files with 59 additions and 346 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 537 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

File diff suppressed because one or more lines are too long

View File

@@ -2,6 +2,12 @@
theme: uncover
---
# Disclaimer :
<img src="./lego_bloc.png" width="400"/>
---
# Voici l'histoire de Petit Poisson
---
@@ -228,7 +234,7 @@ Demo time
---
# 🎼
### 🎼
Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony)
@@ -238,4 +244,5 @@ Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.
LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/)
Courriel : [jg@nationtech.io](mailto:jg@nationtech.io)
Discord : [https://discord.gg/DNR5sbSm4X](https://discord.gg/DNR5sbSm4X)
<img src="./qrcode_discord_nationtech.png" width="120"/>

View File

@@ -8,7 +8,6 @@ use kube::{
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
error::DiscoveryError,
runtime::reflector::Lookup,
};
use kube::{api::DynamicObject, runtime::conditions};
@@ -22,8 +21,6 @@ use serde_json::{Value, json};
use similar::TextDiff;
use tokio::io::AsyncReadExt;
use crate::interpret::Outcome;
#[derive(new, Clone)]
pub struct K8sClient {
client: Client,
@@ -56,57 +53,6 @@ impl K8sClient {
})
}
pub async fn ensure_deployment(
&self,
resource_name: &str,
resource_namespace: &str,
) -> Result<Outcome, Error> {
match self
.get_deployment(resource_name, Some(&resource_namespace))
.await
{
Ok(Some(deployment)) => {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
resource_name, ready_count
)))
} else {
Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"Deployment '{}' in namespace '{}' has 0 ready replicas",
resource_name, resource_namespace
))))
}
} else {
Err(Error::Api(ErrorResponse {
status: "Failure".to_string(),
message: format!(
"No status found for deployment '{}' in namespace '{}'",
resource_name, resource_namespace
),
reason: "MissingStatus".to_string(),
code: 404,
}))
}
}
Ok(None) => Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"Deployment '{}' not found in namespace '{}'",
resource_name, resource_namespace
)))),
Err(e) => Err(Error::Api(ErrorResponse {
status: "Failure".to_string(),
message: format!(
"Failed to fetch deployment '{}' in namespace '{}': {}",
resource_name, resource_namespace, e
),
reason: "ApiError".to_string(),
code: 500,
})),
}
}
pub async fn get_resource_json_value(
&self,
name: &str,
@@ -144,25 +90,6 @@ impl K8sClient {
Ok(pods.get_opt(name).await?)
}
pub async fn patch_resource_by_merge(
&self,
name: &str,
namespace: Option<&str>,
gvk: &GroupVersionKind,
patch: Value,
) -> Result<(), Error> {
let gvk = ApiResource::from_gvk(gvk);
let resource: Api<DynamicObject> = if let Some(ns) = namespace {
Api::namespaced_with(self.client.clone(), ns, &gvk)
} else {
Api::default_namespaced_with(self.client.clone(), &gvk)
};
let pp = PatchParams::default();
let merge = Patch::Merge(&patch);
resource.patch(name, &pp, &merge).await?;
Ok(())
}
pub async fn scale_deployment(
&self,
name: &str,

View File

@@ -160,9 +160,6 @@ global:
## Used for ingresses, certificates, SSO, notifications, etc.
domain: {domain}
securityContext:
runAsUser: null
# -- Runtime class name for all components
runtimeClassName: ""
@@ -474,13 +471,6 @@ redis:
# -- Redis name
name: redis
serviceAccount:
create: true
securityContext:
runAsUser: null
## Redis image
image:
# -- Redis repository

View File

@@ -1,3 +1,2 @@
mod helm;
pub mod update_default_okd_ingress_score;
pub use helm::*;

View File

@@ -1,223 +0,0 @@
use std::{
fs::File,
io::Read,
path::{Path, PathBuf},
sync::Arc,
};
use base64::{Engine, prelude::BASE64_STANDARD};
use fqdn::Path;
use harmony_types::id::Id;
use kube::api::GroupVersionKind;
use serde_json::json;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
pub struct UpdateDefaultOkdIngressScore {
operator_name: String,
operator_namespace: String,
ca_name: String,
path_to_tls_crt: Path,
path_to_tls_key: Path,
path_to_ca_cert: Path,
}
impl<T: Topology> Score<T> for UpdateDefaultOkdIngressScore {
fn name(&self) -> String {
"UpdateDefaultOkdIngressScore".to_string()
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(UpdateDefaultOkdIngressInterpret {
score: self.clone(),
})
}
}
pub struct UpdateDefaultOkdIngressInterpret {
score: UpdateDefaultOkdIngressScore,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for UpdateDefaultOkdIngressInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await?;
let secret_name = "ingress_ca_secret";
self.ensure_ingress_operator(
&client,
&self.score.operator_name,
&self.score.operator_namespace,
)
.await?;
self.create_ca_cm(&client, self.score.path_to_ca_cert, &self.score.ca_name)
.await?;
self.patch_proxy(&client, &self.score.ca_name).await?;
self.create_tls_secret(
&client,
self.score.path_to_tls_crt,
self.score.path_to_tls_key,
&self.score.operator_namespace,
&secret_name,
)
.await?;
self.patch_ingress(&client, &self.score.operator_namespace, &secret_name)
.await?;
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("UpdateDefaultOkdIngress")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl UpdateDefaultOkdIngressInterpret {
async fn ensure_ingress_operator(
&self,
client: &Arc<K8sClient>,
operator_name: &str,
operator_namespace: &str,
) -> Result<Outcome, InterpretError> {
client
.ensure_deployment(operator_name, Some(operator_namespace))
.await?
}
fn open_path(&self, path: Path) -> Result<String, InterpretError> {
let mut file = match File::open(&path) {
Ok(file) => file,
Err(e) => InterpretError::new(format!("Could not open file {}", e)),
};
let s = String::new();
match file.read_to_string(&mut s) {
Ok(s) => Ok(s),
Err(e) => InterpretError::new(format!("Could not read file {}", e)),
}
}
async fn create_ca_cm(
&self,
client: &Arc<K8sClient>,
path_to_ca_cert: Path,
ca_name: &str,
) -> Result<Outcome, InterpretError> {
let ca_bundle = BASE64_STANDARD.encode(self.open_path(path_to_ca_cert).unwrap().as_bytes());
let cm = format!(
r"#
apiVersion: v1
kind: ConfigMap
metadata:
name: custom-ca
namespace: openshift-config
data:
ca-bundle.crt: {ca_bundle}
#"
);
client.apply_yaml(serde_yaml::to_value(&cm), Some("openshift-config")).await?;
Ok(Outcome::success(format!(
"successfully created cm : {} in openshift-config namespace",
ca_name
)))
}
async fn patch_proxy(
&self,
client: &Arc<K8sClient>,
ca_name: &str,
) -> Result<Outcome, InterpretError> {
let gvk = GroupVersionKind {
group: "config.openshift.io".to_string(),
version: "v1".to_string(),
kind: "Proxy".to_string(),
};
let patch = json!({
"spec": {
"trustedCA": {
"name": ca_name
}
}
});
client
.patch_resource_by_merge("cluster", None, &gvk, patch)
.await?;
Ok(Outcome::success(format!(
"successfully merged trusted ca to cluster proxy"
)))
}
async fn create_tls_secret(
&self,
client: &Arc<K8sClient>,
tls_crt: Path,
tls_key: Path,
operator_namespace: &str,
secret_name: &str,
) -> Result<Outcome, InterpretError> {
let base64_tls_crt = BASE64_STANDARD.encode(self.open_path(tls_crt).unwrap().as_bytes());
let base64_tls_key = BASE64_STANDARD.encode(self.open_path(tls_key).unwrap().as_bytes());
let secret = format!(
r#"
apiVersion: v1
kind: Secret
metadata:
name: secret-tls
namespace: {operator_namespace}
type: kubernetes.io/tls
data:
# values are base64 encoded, which obscures them but does NOT provide
# any useful level of confidentiality
# Replace the following values with your own base64-encoded certificate and key.
tls.crt: "{base64_tls_crt}"
tls.key: "{base64_tls_key}"
"#
);
client
.apply_yaml(serde_yaml::to_value(secret), Some(operator_namespace))
.await?;
Ok(Outcome::success(format!(
"successfully created tls secret trusted ca to cluster proxy"
)))
}
async fn patch_ingress(
&self,
client: &Arc<K8sClient>,
operator_namespace: &str,
secret_name: &str,
) -> Result<Outcome, InterpretError> {
let gvk = GroupVersionKind {
group: "operator.openshift.io".to_string(),
version: "v1".to_string(),
kind: "IngressController".to_string(),
};
let patch = json!(
{"spec":{"defaultCertificate": {"name": secret_name}}});
client
.patch_resource_by_merge("default", Some(operator_namespace), &gvk, patch)
.await?;
Ok(Outcome::success(format!("successfully pathed ingress operator to use secret {}", secret_name)))
}
}

View File

@@ -12,6 +12,9 @@ use std::process::Command;
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
Alertmanager, AlertmanagerSpec,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
GrafanaDatasourceSpec, GrafanaSpec,
@@ -22,8 +25,13 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
PrometheusRule, PrometheusRuleSpec, RuleGroup,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
build_prom_role, build_prom_rolebinding, build_prom_service_account,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
ServiceMonitor, ServiceMonitorSpec,
};