diff --git a/Cargo.lock b/Cargo.lock index 63c8897..a2d8c40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1070,6 +1070,21 @@ dependencies = [ "url", ] +[[package]] +name = "example-tenant" +version = "0.1.0" +dependencies = [ + "cidr", + "env_logger", + "harmony", + "harmony_cli", + "harmony_macros", + "harmony_types", + "log", + "tokio", + "url", +] + [[package]] name = "example-tui" version = "0.1.0" @@ -1409,12 +1424,14 @@ dependencies = [ "derive-new", "directories", "dockerfile_builder", + "dyn-clone", "email_address", "env_logger", "fqdn", "harmony_macros", "harmony_types", "helm-wrapper-rs", + "hex", "http 1.3.1", "inquire", "k3d-rs", @@ -1426,6 +1443,7 @@ dependencies = [ "non-blank-string-rs", "opnsense-config", "opnsense-config-xml", + "rand 0.9.1", "reqwest 0.11.27", "russh", "rust-ipmi", @@ -1550,6 +1568,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "hex-literal" version = "0.4.1" diff --git a/adr/core-abstractions/main_context_prompt.md b/adr/003-abstractions/main_context_prompt.md similarity index 100% rename from adr/core-abstractions/main_context_prompt.md rename to adr/003-abstractions/main_context_prompt.md diff --git a/adr/core-abstractions/topology/Cargo.toml b/adr/003-abstractions/topology/Cargo.toml similarity index 100% rename from adr/core-abstractions/topology/Cargo.toml rename to adr/003-abstractions/topology/Cargo.toml diff --git a/adr/core-abstractions/topology/src/main.rs b/adr/003-abstractions/topology/src/main.rs similarity index 100% rename from adr/core-abstractions/topology/src/main.rs rename to adr/003-abstractions/topology/src/main.rs diff --git a/adr/core-abstractions/topology/src/main_claude37_2.rs b/adr/003-abstractions/topology/src/main_claude37_2.rs similarity index 100% rename from adr/core-abstractions/topology/src/main_claude37_2.rs rename to adr/003-abstractions/topology/src/main_claude37_2.rs diff --git a/adr/core-abstractions/topology/src/main_claudev1.rs b/adr/003-abstractions/topology/src/main_claudev1.rs similarity index 100% rename from adr/core-abstractions/topology/src/main_claudev1.rs rename to adr/003-abstractions/topology/src/main_claudev1.rs diff --git a/adr/core-abstractions/topology/src/main_gemini25pro.rs b/adr/003-abstractions/topology/src/main_gemini25pro.rs similarity index 100% rename from adr/core-abstractions/topology/src/main_gemini25pro.rs rename to adr/003-abstractions/topology/src/main_gemini25pro.rs diff --git a/adr/core-abstractions/topology/src/main_geminifail.rs b/adr/003-abstractions/topology/src/main_geminifail.rs similarity index 100% rename from adr/core-abstractions/topology/src/main_geminifail.rs rename to adr/003-abstractions/topology/src/main_geminifail.rs diff --git a/adr/core-abstractions/topology/src/main_right.rs b/adr/003-abstractions/topology/src/main_right.rs similarity index 100% rename from adr/core-abstractions/topology/src/main_right.rs rename to adr/003-abstractions/topology/src/main_right.rs diff --git a/adr/core-abstractions/topology/src/main_v1.rs b/adr/003-abstractions/topology/src/main_v1.rs similarity index 100% rename from adr/core-abstractions/topology/src/main_v1.rs rename to adr/003-abstractions/topology/src/main_v1.rs diff --git a/adr/core-abstractions/topology2/Cargo.toml b/adr/003-abstractions/topology2/Cargo.toml similarity index 100% rename from adr/core-abstractions/topology2/Cargo.toml rename to adr/003-abstractions/topology2/Cargo.toml diff --git a/adr/core-abstractions/topology2/src/main.rs b/adr/003-abstractions/topology2/src/main.rs similarity index 100% rename from adr/core-abstractions/topology2/src/main.rs rename to adr/003-abstractions/topology2/src/main.rs diff --git a/adr/core-abstractions/topology2/src/main_capabilities.rs b/adr/003-abstractions/topology2/src/main_capabilities.rs similarity index 100% rename from adr/core-abstractions/topology2/src/main_capabilities.rs rename to adr/003-abstractions/topology2/src/main_capabilities.rs diff --git a/adr/core-abstractions/topology2/src/main_v1.rs b/adr/003-abstractions/topology2/src/main_v1.rs similarity index 100% rename from adr/core-abstractions/topology2/src/main_v1.rs rename to adr/003-abstractions/topology2/src/main_v1.rs diff --git a/adr/core-abstractions/topology2/src/main_v2.rs b/adr/003-abstractions/topology2/src/main_v2.rs similarity index 100% rename from adr/core-abstractions/topology2/src/main_v2.rs rename to adr/003-abstractions/topology2/src/main_v2.rs diff --git a/adr/core-abstractions/topology2/src/main_v4.rs b/adr/003-abstractions/topology2/src/main_v4.rs similarity index 100% rename from adr/core-abstractions/topology2/src/main_v4.rs rename to adr/003-abstractions/topology2/src/main_v4.rs diff --git a/adr/011-multi-tenant-cluster.md b/adr/011-multi-tenant-cluster.md index 73cd824..88fb0ea 100644 --- a/adr/011-multi-tenant-cluster.md +++ b/adr/011-multi-tenant-cluster.md @@ -137,8 +137,9 @@ Our approach addresses both customer and team multi-tenancy requirements: ### Implementation Roadmap 1. **Phase 1**: Implement VPN access and manual tenant provisioning 2. **Phase 2**: Deploy TenantScore automation for namespace, RBAC, and NetworkPolicy management -3. **Phase 3**: Integrate Keycloak for centralized identity management -4. **Phase 4**: Add advanced monitoring and per-tenant observability +4. **Phase 3**: Work on privilege escalation from pods, audit for weaknesses, enforce security policies on pod runtimes +3. **Phase 4**: Integrate Keycloak for centralized identity management +4. **Phase 5**: Add advanced monitoring and per-tenant observability ### TenantScore Structure Preview ```rust diff --git a/adr/011-tenant/NetworkPolicy.yaml b/adr/011-tenant/NetworkPolicy.yaml new file mode 100644 index 0000000..5bb1c71 --- /dev/null +++ b/adr/011-tenant/NetworkPolicy.yaml @@ -0,0 +1,41 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tenant-isolation-policy + namespace: testtenant +spec: + podSelector: {} # Selects all pods in the namespace + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: {} # Allow from all pods in the same namespace + egress: + - to: + - podSelector: {} # Allow to all pods in the same namespace + - to: + - podSelector: {} + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: openshift-dns # Target the openshift-dns namespace + # Note, only opening port 53 is not enough, will have to dig deeper into this one eventually + # ports: + # - protocol: UDP + # port: 53 + # - protocol: TCP + # port: 53 + # Allow egress to public internet only + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 # RFC1918 + - 172.16.0.0/12 # RFC1918 + - 192.168.0.0/16 # RFC1918 + - 169.254.0.0/16 # Link-local + - 127.0.0.0/8 # Loopback + - 224.0.0.0/4 # Multicast + - 240.0.0.0/4 # Reserved + - 100.64.0.0/10 # Carrier-grade NAT + - 0.0.0.0/8 # Reserved diff --git a/adr/011-tenant/TestDeployment.yaml b/adr/011-tenant/TestDeployment.yaml new file mode 100644 index 0000000..a075ba8 --- /dev/null +++ b/adr/011-tenant/TestDeployment.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: testtenant +--- +apiVersion: v1 +kind: Namespace +metadata: + name: testtenant2 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-web + namespace: testtenant +spec: + replicas: 1 + selector: + matchLabels: + app: test-web + template: + metadata: + labels: + app: test-web + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: test-web + namespace: testtenant +spec: + selector: + app: test-web + ports: + - port: 80 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-client + namespace: testtenant +spec: + replicas: 1 + selector: + matchLabels: + app: test-client + template: + metadata: + labels: + app: test-client + spec: + containers: + - name: curl + image: curlimages/curl:latest + command: ["/bin/sh", "-c", "sleep 3600"] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-web + namespace: testtenant2 +spec: + replicas: 1 + selector: + matchLabels: + app: test-web + template: + metadata: + labels: + app: test-web + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: test-web + namespace: testtenant2 +spec: + selector: + app: test-web + ports: + - port: 80 + targetPort: 8080 diff --git a/examples/tenant/Cargo.toml b/examples/tenant/Cargo.toml new file mode 100644 index 0000000..94267da --- /dev/null +++ b/examples/tenant/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "example-tenant" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true +publish = false + +[dependencies] +harmony = { path = "../../harmony" } +harmony_cli = { path = "../../harmony_cli" } +harmony_types = { path = "../../harmony_types" } +cidr = { workspace = true } +tokio = { workspace = true } +harmony_macros = { path = "../../harmony_macros" } +log = { workspace = true } +env_logger = { workspace = true } +url = { workspace = true } diff --git a/examples/tenant/src/main.rs b/examples/tenant/src/main.rs new file mode 100644 index 0000000..a389ac6 --- /dev/null +++ b/examples/tenant/src/main.rs @@ -0,0 +1,41 @@ +use harmony::{ + data::Id, + inventory::Inventory, + maestro::Maestro, + modules::tenant::TenantScore, + topology::{K8sAnywhereTopology, tenant::TenantConfig}, +}; + +#[tokio::main] +async fn main() { + let tenant = TenantScore { + config: TenantConfig { + id: Id::default(), + name: "TestTenant".to_string(), + ..Default::default() + }, + }; + + let mut maestro = Maestro::::initialize( + Inventory::autoload(), + K8sAnywhereTopology::new(), + ) + .await + .unwrap(); + + maestro.register_all(vec![Box::new(tenant)]); + harmony_cli::init(maestro, None).await.unwrap(); +} + +// TODO write tests +// - Create Tenant with default config mostly, make sure namespace is created +// - deploy sample client/server app with nginx unprivileged and a service +// - exec in the client pod and validate the following +// - can reach internet +// - can reach server pod +// - can resolve dns queries to internet +// - can resolve dns queries to services +// - cannot reach services and pods in other namespaces +// - Create Tenant with specific cpu/ram/storage requests / limits and make sure they are enforced by trying to +// deploy a pod with lower requests/limits (accepted) and higher requests/limits (rejected) +// - Create TenantCredentials and make sure they give only access to the correct tenant diff --git a/harmony/Cargo.toml b/harmony/Cargo.toml index fcf69cf..b98ec49 100644 --- a/harmony/Cargo.toml +++ b/harmony/Cargo.toml @@ -6,6 +6,8 @@ readme.workspace = true license.workspace = true [dependencies] +rand = "0.9" +hex = "0.4" libredfish = "0.1.1" reqwest = { version = "0.11", features = ["blocking", "json"] } russh = "0.45.0" diff --git a/harmony/src/domain/data/id.rs b/harmony/src/domain/data/id.rs index e215eb4..2950324 100644 --- a/harmony/src/domain/data/id.rs +++ b/harmony/src/domain/data/id.rs @@ -1,5 +1,23 @@ +use rand::distr::Alphanumeric; +use rand::distr::SampleString; +use std::time::SystemTime; +use std::time::UNIX_EPOCH; + use serde::{Deserialize, Serialize}; +/// A unique identifier designed for ease of use. +/// +/// You can pass it any String to use and Id, or you can use the default format with `Id::default()` +/// +/// The default format looks like this +/// +/// `462d4c_g2COgai` +/// +/// The first part is the unix timesamp in hexadecimal which makes Id easily sorted by creation time. +/// Second part is a serie of 7 random characters. +/// +/// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per +/// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Id { value: String, @@ -16,3 +34,20 @@ impl std::fmt::Display for Id { f.write_str(&self.value) } } + +impl Default for Id { + fn default() -> Self { + let start = SystemTime::now(); + let since_the_epoch = start + .duration_since(UNIX_EPOCH) + .expect("Time went backwards"); + let timestamp = since_the_epoch.as_secs(); + + let hex_timestamp = format!("{:x}", timestamp & 0xffffff); + + let random_part: String = Alphanumeric.sample_string(&mut rand::rng(), 7); + + let value = format!("{}_{}", hex_timestamp, random_part); + Self { value } + } +} diff --git a/harmony/src/domain/topology/k8s.rs b/harmony/src/domain/topology/k8s.rs index 08868e8..cfaae1f 100644 --- a/harmony/src/domain/topology/k8s.rs +++ b/harmony/src/domain/topology/k8s.rs @@ -1,11 +1,11 @@ use derive_new::new; -use k8s_openapi::NamespaceResourceScope; +use k8s_openapi::{ClusterResourceScope, NamespaceResourceScope}; use kube::{ Api, Client, Config, Error, Resource, api::PostParams, config::{KubeConfigOptions, Kubeconfig}, }; -use log::error; +use log::{debug, error, trace}; use serde::de::DeserializeOwned; #[derive(new)] @@ -20,52 +20,35 @@ impl K8sClient { }) } - pub async fn apply_all< - K: Resource - + std::fmt::Debug - + Sync - + DeserializeOwned - + Default - + serde::Serialize - + Clone, - >( - &self, - resource: &Vec, - ) -> Result, kube::Error> + pub async fn apply(&self, resource: &K, ns: Option<&str>) -> Result where + K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize, + ::Scope: ApplyStrategy, ::DynamicType: Default, { - let mut result = vec![]; - for r in resource.iter() { - let api: Api = Api::all(self.client.clone()); - result.push(api.create(&PostParams::default(), &r).await?); - } - Ok(result) + debug!( + "Applying resource {:?} with ns {:?}", + resource.meta().name, + ns + ); + trace!("{:#?}", serde_json::to_string(resource)); + + let api: Api = <::Scope as ApplyStrategy>::get_api(&self.client, ns); + api.create(&PostParams::default(), &resource).await } - pub async fn apply_namespaced( - &self, - resource: &Vec, - ns: Option<&str>, - ) -> Result, Error> + pub async fn apply_many(&self, resource: &Vec, ns: Option<&str>) -> Result, Error> where - K: Resource - + Clone - + std::fmt::Debug - + DeserializeOwned - + serde::Serialize - + Default, + K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize, + ::Scope: ApplyStrategy, ::DynamicType: Default, { - let mut resources = Vec::new(); + let mut result = Vec::new(); for r in resource.iter() { - let api: Api = match ns { - Some(ns) => Api::namespaced(self.client.clone(), ns), - None => Api::default_namespaced(self.client.clone()), - }; - resources.push(api.create(&PostParams::default(), &r).await?); + result.push(self.apply(r, ns).await?); } - Ok(resources) + + Ok(result) } pub(crate) async fn from_kubeconfig(path: &str) -> Option { @@ -86,3 +69,35 @@ impl K8sClient { )) } } + +pub trait ApplyStrategy { + fn get_api(client: &Client, ns: Option<&str>) -> Api; +} + +/// Implementation for all resources that are cluster-scoped. +/// It will always use `Api::all` and ignore the namespace parameter. +impl ApplyStrategy for ClusterResourceScope +where + K: Resource, + ::DynamicType: Default, +{ + fn get_api(client: &Client, _ns: Option<&str>) -> Api { + Api::all(client.clone()) + } +} + +/// Implementation for all resources that are namespace-scoped. +/// It will use `Api::namespaced` if a namespace is provided, otherwise +/// it falls back to the default namespace configured in your kubeconfig. +impl ApplyStrategy for NamespaceResourceScope +where + K: Resource, + ::DynamicType: Default, +{ + fn get_api(client: &Client, ns: Option<&str>) -> Api { + match ns { + Some(ns) => Api::namespaced(client.clone(), ns), + None => Api::default_namespaced(client.clone()), + } + } +} diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index ef11f36..fd0685d 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -1,4 +1,4 @@ -use std::{process::Command, sync::Arc}; +use std::{io::Error, process::Command, sync::Arc}; use async_trait::async_trait; use inquire::Confirm; @@ -6,6 +6,7 @@ use log::{info, warn}; use tokio::sync::OnceCell; use crate::{ + data::Id, executors::ExecutorError, interpret::{InterpretError, Outcome}, inventory::Inventory, @@ -170,6 +171,22 @@ impl K8sAnywhereTopology { Ok(Some(state)) } + async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> { + if let Some(_) = self.tenant_manager.get() { + return Ok(()); + } + + self.tenant_manager + .get_or_try_init(async || -> Result { + let k8s_client = self.k8s_client().await?; + Ok(K8sTenantManager::new(k8s_client)) + }) + .await + .unwrap(); + + Ok(()) + } + fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> { match self.tenant_manager.get() { Some(t) => Ok(t), @@ -217,6 +234,10 @@ impl Topology for K8sAnywhereTopology { "No K8s client could be found or installed".to_string(), ))?; + self.ensure_k8s_tenant_manager() + .await + .map_err(|e| InterpretError::new(e))?; + match self.is_helm_available() { Ok(()) => Ok(Outcome::success(format!( "{} + helm available", @@ -239,27 +260,27 @@ impl TenantManager for K8sAnywhereTopology { async fn update_tenant_resource_limits( &self, - tenant_name: &str, + tenant_id: &Id, new_limits: &ResourceLimits, ) -> Result<(), ExecutorError> { self.get_k8s_tenant_manager()? - .update_tenant_resource_limits(tenant_name, new_limits) + .update_tenant_resource_limits(tenant_id, new_limits) .await } async fn update_tenant_network_policy( &self, - tenant_name: &str, + tenant_id: &Id, new_policy: &TenantNetworkPolicy, ) -> Result<(), ExecutorError> { self.get_k8s_tenant_manager()? - .update_tenant_network_policy(tenant_name, new_policy) + .update_tenant_network_policy(tenant_id, new_policy) .await } - async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError> { + async fn deprovision_tenant(&self, tenant_id: &Id) -> Result<(), ExecutorError> { self.get_k8s_tenant_manager()? - .deprovision_tenant(tenant_name) + .deprovision_tenant(tenant_id) .await } } diff --git a/harmony/src/domain/topology/tenant/k8s.rs b/harmony/src/domain/topology/tenant/k8s.rs index 88cf712..ed51d96 100644 --- a/harmony/src/domain/topology/tenant/k8s.rs +++ b/harmony/src/domain/topology/tenant/k8s.rs @@ -1,9 +1,17 @@ use std::sync::Arc; -use crate::{executors::ExecutorError, topology::k8s::K8sClient}; +use crate::{data::Id, executors::ExecutorError, topology::k8s::K8sClient}; use async_trait::async_trait; use derive_new::new; -use k8s_openapi::api::core::v1::Namespace; +use k8s_openapi::{ + NamespaceResourceScope, + api::{ + core::v1::{Namespace, ResourceQuota}, + networking::v1::NetworkPolicy, + }, +}; +use kube::Resource; +use serde::de::DeserializeOwned; use serde_json::json; use super::{ResourceLimits, TenantConfig, TenantManager, TenantNetworkPolicy}; @@ -13,9 +21,29 @@ pub struct K8sTenantManager { k8s_client: Arc, } -#[async_trait] -impl TenantManager for K8sTenantManager { - async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> { +impl K8sTenantManager { + fn get_namespace_name(&self, config: &TenantConfig) -> String { + config.name.clone() + } + + fn ensure_constraints(&self, namespace: &Namespace) -> Result<(), ExecutorError> { + todo!("Validate that when tenant already exists (by id) that name has not changed"); + todo!("Make sure other Tenant constraints are respected by this k8s implementation"); + } + + async fn apply_resource< + K: Resource + std::fmt::Debug + Sync + DeserializeOwned + Default + serde::Serialize + Clone, + >( + &self, + resource: K, + ) -> Result + where + ::DynamicType: Default, + { + todo!("Apply tenant labels on resource and apply resource with k8s client properly") + } + + fn build_namespace(&self, config: &TenantConfig) -> Result { let namespace = json!( { "apiVersion": "v1", @@ -25,14 +53,19 @@ impl TenantManager for K8sTenantManager { "harmony.nationtech.io/tenant.id": config.id, "harmony.nationtech.io/tenant.name": config.name, }, - "name": config.name, + "name": self.get_namespace_name(config), }, } ); - todo!("Validate that when tenant already exists (by id) that name has not changed"); - - let namespace: Namespace = serde_json::from_value(namespace).unwrap(); + serde_json::from_value(namespace).map_err(|e| { + ExecutorError::ConfigurationError(format!( + "Could not build TenantManager Namespace. {}", + e + )) + }) + } + fn build_resource_quota(&self, config: &TenantConfig) -> Result { let resource_quota = json!( { "apiVersion": "v1", @@ -47,7 +80,7 @@ impl TenantManager for K8sTenantManager { "harmony.nationtech.io/tenant.id": config.id, "harmony.nationtech.io/tenant.name": config.name, }, - "namespace": config.name, + "namespace": self.get_namespace_name(config), }, "spec": { "hard": { @@ -71,11 +104,104 @@ impl TenantManager for K8sTenantManager { } ); + serde_json::from_value(resource_quota).map_err(|e| { + ExecutorError::ConfigurationError(format!( + "Could not build TenantManager ResourceQuota. {}", + e + )) + }) + } + + fn build_network_policy(&self, config: &TenantConfig) -> Result { + let network_policy = json!({ + "apiVersion": "networking.k8s.io/v1", + "kind": "NetworkPolicy", + "metadata": { + "name": format!("{}-network-policy", config.name), + }, + "spec": { + "podSelector": {}, + "egress": [ + { "to": [ {"podSelector": {}}]}, + { "to": + [ + { + "podSelector": {}, + "namespaceSelector": { + "matchLabels": { + "kubernetes.io/metadata.name":"openshift-dns" + } + } + }, + ] + }, + { "to": [ + { + "ipBlock": { + + "cidr": "0.0.0.0/0", + // See https://en.wikipedia.org/wiki/Reserved_IP_addresses + "except": [ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "192.0.0.0/24", + "192.0.2.0/24", + "192.88.99.0/24", + "192.18.0.0/15", + "198.51.100.0/24", + "169.254.0.0/16", + "203.0.113.0/24", + "127.0.0.0/8", + + // Not sure we should block this one as it is + // used for multicast. But better block more than less. + "224.0.0.0/4", + "240.0.0.0/4", + "100.64.0.0/10", + "233.252.0.0/24", + "0.0.0.0/8", + ], + } + } + ] + }, + ], + "ingress": [ + { "from": [ {"podSelector": {}}]} + ], + "policyTypes": [ + "Ingress", "Egress", + ] + } + }); + + serde_json::from_value(network_policy).map_err(|e| { + ExecutorError::ConfigurationError(format!( + "Could not build TenantManager NetworkPolicy. {}", + e + )) + }) + } +} + +#[async_trait] +impl TenantManager for K8sTenantManager { + async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> { + let namespace = self.build_namespace(config)?; + let resource_quota = self.build_resource_quota(config)?; + let network_policy = self.build_network_policy(config)?; + + self.ensure_constraints(&namespace)?; + self.apply_resource(namespace).await?; + self.apply_resource(resource_quota).await?; + self.apply_resource(network_policy).await?; + todo!(); } async fn update_tenant_resource_limits( &self, - tenant_name: &str, + tenant_id: &Id, new_limits: &ResourceLimits, ) -> Result<(), ExecutorError> { todo!() @@ -83,13 +209,13 @@ impl TenantManager for K8sTenantManager { async fn update_tenant_network_policy( &self, - tenant_name: &str, + tenant_id: &Id, new_policy: &TenantNetworkPolicy, ) -> Result<(), ExecutorError> { todo!() } - async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError> { + async fn deprovision_tenant(&self, tenant_id: &Id) -> Result<(), ExecutorError> { todo!() } } diff --git a/harmony/src/domain/topology/tenant/manager.rs b/harmony/src/domain/topology/tenant/manager.rs index 4166261..df042c8 100644 --- a/harmony/src/domain/topology/tenant/manager.rs +++ b/harmony/src/domain/topology/tenant/manager.rs @@ -16,31 +16,20 @@ pub trait TenantManager { async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>; /// Updates the resource limits for an existing tenant. - /// - /// # Arguments - /// * `tenant_name`: The logical name of the tenant to update. - /// * `new_limits`: The new set of resource limits to apply. async fn update_tenant_resource_limits( &self, - tenant_name: &str, + tenant_id: &Id, new_limits: &ResourceLimits, ) -> Result<(), ExecutorError>; /// Updates the high-level network isolation policy for an existing tenant. - /// - /// # Arguments - /// * `tenant_name`: The logical name of the tenant to update. - /// * `new_policy`: The new network policy to apply. async fn update_tenant_network_policy( &self, - tenant_name: &str, + tenant_id: &Id, new_policy: &TenantNetworkPolicy, ) -> Result<(), ExecutorError>; /// Decommissions an existing tenant, removing its isolated context and associated resources. /// This operation should be idempotent. - /// - /// # Arguments - /// * `tenant_name`: The logical name of the tenant to deprovision. - async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError>; + async fn deprovision_tenant(&self, tenant_id: &Id) -> Result<(), ExecutorError>; } diff --git a/harmony/src/domain/topology/tenant/mod.rs b/harmony/src/domain/topology/tenant/mod.rs index e1e93a2..4bbefef 100644 --- a/harmony/src/domain/topology/tenant/mod.rs +++ b/harmony/src/domain/topology/tenant/mod.rs @@ -27,6 +27,28 @@ pub struct TenantConfig { pub labels_or_tags: HashMap, } +impl Default for TenantConfig { + fn default() -> Self { + let id = Id::default(); + Self { + name: format!("tenant_{id}"), + id, + resource_limits: ResourceLimits { + cpu_request_cores: 4.0, + cpu_limit_cores: 4.0, + memory_request_gb: 4.0, + memory_limit_gb: 4.0, + storage_total_gb: 20.0, + }, + network_policy: TenantNetworkPolicy { + default_inter_tenant_ingress: InterTenantIngressPolicy::DenyAll, + default_internet_egress: InternetEgressPolicy::AllowAll, + }, + labels_or_tags: HashMap::new(), + } + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)] pub struct ResourceLimits { /// Requested/guaranteed CPU cores (e.g., 2.0). diff --git a/harmony/src/modules/k8s/ingress.rs b/harmony/src/modules/k8s/ingress.rs index 883d721..d07d82f 100644 --- a/harmony/src/modules/k8s/ingress.rs +++ b/harmony/src/modules/k8s/ingress.rs @@ -1,5 +1,6 @@ use harmony_macros::ingress_path; use k8s_openapi::api::networking::v1::Ingress; +use log::{debug, trace}; use serde::Serialize; use serde_json::json; @@ -56,22 +57,24 @@ impl Score for K8sIngressScore { let ingress = json!( { "metadata": { - "name": self.name + "name": self.name.to_string(), }, "spec": { "rules": [ - { "host": self.host, + { "host": self.host.to_string(), "http": { "paths": [ { "path": path, "pathType": path_type.as_str(), - "backend": [ - { - "service": self.backend_service, - "port": self.port + "backend": { + "service": { + "name": self.backend_service.to_string(), + "port": { + "number": self.port, + } } - ] + } } ] } @@ -81,13 +84,16 @@ impl Score for K8sIngressScore { } ); + trace!("Building ingresss object from Value {ingress:#}"); let ingress: Ingress = serde_json::from_value(ingress).unwrap(); + debug!( + "Successfully built Ingress for host {:?}", + ingress.metadata.name + ); Box::new(K8sResourceInterpret { score: K8sResourceScore::single( ingress.clone(), - self.namespace - .clone() - .map(|f| f.as_c_str().to_str().unwrap().to_string()), + self.namespace.clone().map(|f| f.to_string()), ), }) } diff --git a/harmony/src/modules/k8s/resource.rs b/harmony/src/modules/k8s/resource.rs index 6880292..3c0b2bf 100644 --- a/harmony/src/modules/k8s/resource.rs +++ b/harmony/src/modules/k8s/resource.rs @@ -1,6 +1,7 @@ use async_trait::async_trait; use k8s_openapi::NamespaceResourceScope; use kube::Resource; +use log::info; use serde::{Serialize, de::DeserializeOwned}; use crate::{ @@ -75,11 +76,12 @@ where _inventory: &Inventory, topology: &T, ) -> Result { + info!("Applying {} resources", self.score.resource.len()); topology .k8s_client() .await .expect("Environment should provide enough information to instanciate a client") - .apply_namespaced(&self.score.resource, self.score.namespace.as_deref()) + .apply_many(&self.score.resource, self.score.namespace.as_deref()) .await?; Ok(Outcome::success( diff --git a/harmony/src/modules/tenant/mod.rs b/harmony/src/modules/tenant/mod.rs index 5ee212c..72412ec 100644 --- a/harmony/src/modules/tenant/mod.rs +++ b/harmony/src/modules/tenant/mod.rs @@ -14,7 +14,7 @@ use crate::{ #[derive(Debug, Serialize, Clone)] pub struct TenantScore { - config: TenantConfig, + pub config: TenantConfig, } impl Score for TenantScore {