Compare commits

...

1 Commits

Author SHA1 Message Date
a0c0905c3b wip: zitadel deployment 2026-03-06 10:56:48 -05:00
8 changed files with 426 additions and 16 deletions

12
Cargo.lock generated
View File

@@ -2293,6 +2293,18 @@ dependencies = [
"url",
]
[[package]]
name = "example-zitadel"
version = "0.1.0"
dependencies = [
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_types",
"tokio",
"url",
]
[[package]]
name = "example_validate_ceph_cluster_health"
version = "0.1.0"

View File

@@ -103,6 +103,12 @@ pub struct DrainOptions {
pub timeout: Duration,
}
pub enum WriteMode {
CreateOrUpdate,
Create,
Update,
}
impl Default for DrainOptions {
fn default() -> Self {
Self {
@@ -834,6 +840,16 @@ impl K8sClient {
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as kube::Resource>::DynamicType: Default,
{
self.apply_with_strategy(resource, namespace, WriteMode::CreateOrUpdate).await
}
pub async fn apply_with_strategy<K>(&self, resource: &K, namespace: Option<&str>, apply_strategy: WriteMode) -> Result<K, Error>
where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as kube::Resource>::DynamicType: Default,
{
todo!("Refactoring in progress: Handle the apply_strategy parameter and add utility functions like apply that set it for ease of use (create, update)");
debug!(
"Applying resource {:?} with ns {:?}",
resource.meta().name,

View File

@@ -1,7 +1,6 @@
use async_trait::async_trait;
use crate::{
interpret::Outcome,
inventory::Inventory,
modules::postgresql::{
K8sPostgreSQLScore,

View File

@@ -1,5 +1,5 @@
use async_trait::async_trait;
use k8s_openapi::{NamespaceResourceScope, ResourceScope};
use k8s_openapi::ResourceScope;
use kube::Resource;
use log::info;
use serde::{Serialize, de::DeserializeOwned};
@@ -109,7 +109,7 @@ where
topology
.k8s_client()
.await
.expect("Environment should provide enough information to instanciate a client")
.map_err(|e| InterpretError::new(format!("Failed to get k8s client : {e}")))
.apply_many(&self.score.resource, self.score.namespace.as_deref())
.await?;

View File

@@ -1,3 +1,5 @@
use std::collections::BTreeMap;
use kube::{CustomResource, api::ObjectMeta};
use serde::{Deserialize, Serialize};
@@ -16,6 +18,10 @@ pub struct ClusterSpec {
pub image_name: Option<String>,
pub storage: Storage,
pub bootstrap: Bootstrap,
/// This must be set to None if you want cnpg to generate a superuser secret
#[serde(skip_serializing_if = "Option::is_none")]
pub superuser_secret: Option<BTreeMap<String, String>>,
pub enable_superuser_access: bool,
}
impl Default for Cluster {
@@ -34,6 +40,8 @@ impl Default for ClusterSpec {
image_name: None,
storage: Storage::default(),
bootstrap: Bootstrap::default(),
superuser_secret: None,
enable_superuser_access: false,
}
}
}

View File

@@ -52,8 +52,8 @@ pub struct CloudNativePgOperatorScore {
pub source_namespace: String,
}
impl Default for CloudNativePgOperatorScore {
fn default() -> Self {
impl CloudNativePgOperatorScore {
fn default_openshift() -> Self {
Self {
namespace: "openshift-operators".to_string(),
channel: "stable-v1".to_string(),
@@ -68,7 +68,7 @@ impl CloudNativePgOperatorScore {
pub fn new(namespace: &str) -> Self {
Self {
namespace: namespace.to_string(),
..Default::default()
..Self::default_openshift()
}
}
}

View File

@@ -1,3 +1,5 @@
use std::collections::BTreeMap;
use serde::Serialize;
use crate::interpret::Interpret;
@@ -66,6 +68,11 @@ impl<T: Topology + K8sclient> Score<T> for K8sPostgreSQLScore {
owner: "app".to_string(),
},
},
// superuser_secret: Some(BTreeMap::from([(
// "name".to_string(),
// format!("{}-superuser", self.config.cluster_name.clone()),
// )])),
enable_superuser_access: true,
..ClusterSpec::default()
};

View File

@@ -1,43 +1,387 @@
use base64::{Engine, prelude::BASE64_STANDARD};
use rand::{thread_rng, Rng};
use rand::distributions::Alphanumeric;
use k8s_openapi::api::core::v1::Namespace;
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
use k8s_openapi::{ByteString, api::core::v1::Secret};
use std::collections::BTreeMap;
use std::str::FromStr;
use async_trait::async_trait;
use harmony_macros::hurl;
use harmony_types::id::Id;
use harmony_types::storage::StorageSize;
use log::{debug, error, info, trace, warn};
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use crate::{
interpret::Interpret,
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository},
modules::k8s::resource::K8sResourceScore,
modules::postgresql::capability::{PostgreSQL, PostgreSQLClusterRole, PostgreSQLConfig},
score::Score,
topology::{HelmCommand, K8sclient, Topology},
};
const NAMESPACE: &str = "zitadel";
const PG_CLUSTER_NAME: &str = "zitadel-pg";
const MASTERKEY_SECRET_NAME: &str = "zitadel-masterkey";
/// Opinionated Zitadel deployment score.
///
/// Deploys a PostgreSQL cluster (via the [`PostgreSQL`] trait) and the Zitadel
/// Helm chart into the same namespace. Intended as a central multi-tenant IdP
/// with SSO for OKD/OpenShift, OpenBao, Harbor, Grafana, Nextcloud, Ente
/// Photos, and others.
///
/// # Ingress annotations
/// No controller-specific ingress annotations are set. The Zitadel service
/// already carries the Traefik h2c annotation for k3s/k3d by default.
/// Add annotations via `values_overrides` depending on your distribution:
/// - NGINX: `nginx.ingress.kubernetes.io/backend-protocol: GRPC`
/// - OpenShift HAProxy: `haproxy.router.openshift.io/*` or use OpenShift Routes
/// - AWS ALB: set `ingress.controller: aws`
///
/// # Database credentials
/// CNPG creates a `<cluster>-superuser` secret with key `password`. Because
/// `envVarsSecret` injects secret keys verbatim as env var names and the CNPG
/// key (`password`) does not match ZITADEL's expected name
/// (`ZITADEL_DATABASE_POSTGRES_USER_PASSWORD`), individual `env` entries with
/// `valueFrom.secretKeyRef` are used instead. For environments with an
/// External Secrets Operator or similar, create a dedicated secret with the
/// correct ZITADEL env var names and switch to `envVarsSecret`.
#[derive(Debug, Serialize, Clone)]
pub struct ZitadelScore {
/// Host used for external access (ingress)
/// External domain (e.g. `"auth.example.com"`).
pub host: String,
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for ZitadelScore {
impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelScore {
fn name(&self) -> String {
"ZitadelScore".to_string()
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
// TODO exec pod commands to initialize secret store if not already done
Box::new(ZitadelInterpret {
host: self.host.clone(),
})
}
}
// ---------------------------------------------------------------------------
#[derive(Debug, Clone)]
struct ZitadelInterpret {
host: String,
}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for ZitadelInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
info!(
"[Zitadel] Starting full deployment — namespace: '{NAMESPACE}', host: '{}'",
self.host
);
info!("Creating namespace {NAMESPACE} if it does not exist");
K8sResourceScore::single(
Namespace {
metadata: ObjectMeta {
name: Some(NAMESPACE.to_string()),
..Default::default()
},
..Default::default()
},
None,
)
.interpret(inventory, topology)
.await?;
// --- Step 1: PostgreSQL -------------------------------------------
let pg_config = PostgreSQLConfig {
cluster_name: PG_CLUSTER_NAME.to_string(),
instances: 2,
storage_size: StorageSize::gi(10),
role: PostgreSQLClusterRole::Primary,
namespace: NAMESPACE.to_string(),
};
debug!(
"[Zitadel] Deploying PostgreSQL cluster '{}' — instances: {}, storage: 10Gi, namespace: '{}'",
pg_config.cluster_name, pg_config.instances, pg_config.namespace
);
topology.deploy(&pg_config).await.map_err(|e| {
let msg = format!(
"[Zitadel] PostgreSQL deployment failed for '{}': {e}",
pg_config.cluster_name
);
error!("{msg}");
InterpretError::new(msg)
})?;
info!(
"[Zitadel] PostgreSQL cluster '{}' deployed",
pg_config.cluster_name
);
// --- Step 2: Resolve internal DB endpoint -------------------------
debug!(
"[Zitadel] Resolving internal endpoint for cluster '{}'",
pg_config.cluster_name
);
let endpoint = topology.get_endpoint(&pg_config).await.map_err(|e| {
let msg = format!(
"[Zitadel] Failed to resolve endpoint for cluster '{}': {e}",
pg_config.cluster_name
);
error!("{msg}");
InterpretError::new(msg)
})?;
info!(
"[Zitadel] DB endpoint resolved — host: '{}', port: {}",
endpoint.host, endpoint.port
);
// The CNPG-managed superuser secret contains 'password', 'username',
// 'host', 'port', 'dbname', 'uri'. We reference 'password' directly
// via env.valueFrom.secretKeyRef because CNPG's key names do not
// match ZITADEL's required env var names.
let pg_user_secret = format!("{PG_CLUSTER_NAME}-app");
let pg_superuser_secret = format!("{PG_CLUSTER_NAME}-superuser");
let db_host = &endpoint.host;
let db_port = endpoint.port;
let host = &self.host;
let values_yaml = Some(format!(r#""#));
debug!(
"[Zitadel] DB credentials source — secret: '{pg_user_secret}', key: 'password'"
);
debug!(
"[Zitadel] DB credentials source — superuser secret: '{pg_superuser_secret}', key: 'password'"
);
todo!("This is not complete yet");
// --- Step 3: Create masterkey secret ------------------------------------
HelmChartScore {
namespace: Some(NonBlankString::from_str("zitadel").unwrap()),
debug!(
"[Zitadel] Creating masterkey secret '{}' in namespace '{}'",
MASTERKEY_SECRET_NAME, NAMESPACE
);
// Masterkey for symmetric encryption — must be exactly 32 ASCII bytes.
let masterkey: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(32)
.map(char::from)
.collect();
let masterkey_bytes = BASE64_STANDARD.encode(&masterkey);
let mut masterkey_data: BTreeMap<String, ByteString> = BTreeMap::new();
masterkey_data.insert("masterkey".to_string(), ByteString(masterkey_bytes.into()));
let masterkey_secret = Secret {
metadata: ObjectMeta {
name: Some(MASTERKEY_SECRET_NAME.to_string()),
namespace: Some(NAMESPACE.to_string()),
..ObjectMeta::default()
},
data: Some(masterkey_data),
..Secret::default()
};
topology
.k8s_client()
.await
.map_err(|e| InterpretError::new(format!("Failed to get k8s client : {e}")))
.create(masterkey_secret)
.await?;
K8sResourceScore::single(masterkey_secret, Some(NAMESPACE.to_string()))
.interpret(inventory, topology)
.await
.map_err(|e| {
let msg = format!("[Zitadel] Failed to create masterkey secret: {e}");
error!("{msg}");
InterpretError::new(msg)
})?;
info!(
"[Zitadel] Masterkey secret '{}' created",
MASTERKEY_SECRET_NAME
);
// --- Step 4: Build Helm values ------------------------------------
warn!(
"[Zitadel] No ingress controller annotations are set. \
Add controller-specific annotations for your distribution: \
NGINX → 'nginx.ingress.kubernetes.io/backend-protocol: GRPC'; \
OpenShift HAProxy → 'haproxy.router.openshift.io/*' or use Routes; \
AWS ALB → set ingress.controller=aws."
);
let values_yaml = format!(
r#"zitadel:
masterkeySecretName: "{MASTERKEY_SECRET_NAME}"
configmapConfig:
ExternalDomain: "{host}"
ExternalSecure: true
TLS:
Enabled: false
Database:
Postgres:
Host: "{db_host}"
Port: {db_port}
Database: zitadel
MaxOpenConns: 20
MaxIdleConns: 10
User:
Username: postgres
SSL:
Mode: require
Admin:
Username: postgres
SSL:
Mode: require
# Directly import credentials from the postgres secret
# TODO : use a less privileged postgres user
env:
- name: ZITADEL_DATABASE_POSTGRES_USER_USERNAME
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: user
- name: ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: user
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
# Security context for OpenShift restricted PSA compliance
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
# Init job security context (runs before main deployment)
initJob:
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
# Setup job security context
setupJob:
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
ingress:
enabled: true
annotations: {{}}
hosts:
- host: "{host}"
paths:
- path: /
pathType: Prefix
login:
enabled: true
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
ingress:
enabled: true
annotations: {{}}
hosts:
- host: "{host}"
paths:
- path: /ui/v2/login
pathType: Prefix"#
);
trace!("[Zitadel] Helm values YAML:\n{values_yaml}");
// --- Step 5: Deploy Helm chart ------------------------------------
info!(
"[Zitadel] Deploying Helm chart 'zitadel/zitadel' as release 'zitadel' in namespace '{NAMESPACE}'"
);
let result = HelmChartScore {
namespace: Some(NonBlankString::from_str(NAMESPACE).unwrap()),
release_name: NonBlankString::from_str("zitadel").unwrap(),
chart_name: NonBlankString::from_str("zitadel/zitadel").unwrap(),
chart_version: None,
values_overrides: None,
values_yaml,
values_yaml: Some(values_yaml),
create_namespace: true,
install_only: false,
repository: Some(HelmRepository::new(
@@ -46,6 +390,30 @@ impl<T: Topology + K8sclient + HelmCommand> Score<T> for ZitadelScore {
true,
)),
}
.create_interpret()
.interpret(inventory, topology)
.await;
match &result {
Ok(_) => info!("[Zitadel] Helm chart deployed successfully"),
Err(e) => error!("[Zitadel] Helm chart deployment failed: {e}"),
}
result
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("Zitadel")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
vec![]
}
}