Merge pull request 'TenantManager_impl_k8s_anywhere' (#47) from TenantManager_impl_k8s_anywhere into master
All checks were successful
Run Check Script / check (push) Successful in 1m56s

Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/47
This commit is contained in:
johnride 2025-06-09 18:07:32 +00:00
commit bf7a6d590c
31 changed files with 524 additions and 86 deletions

24
Cargo.lock generated
View File

@ -1070,6 +1070,21 @@ dependencies = [
"url", "url",
] ]
[[package]]
name = "example-tenant"
version = "0.1.0"
dependencies = [
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_types",
"log",
"tokio",
"url",
]
[[package]] [[package]]
name = "example-tui" name = "example-tui"
version = "0.1.0" version = "0.1.0"
@ -1409,12 +1424,14 @@ dependencies = [
"derive-new", "derive-new",
"directories", "directories",
"dockerfile_builder", "dockerfile_builder",
"dyn-clone",
"email_address", "email_address",
"env_logger", "env_logger",
"fqdn", "fqdn",
"harmony_macros", "harmony_macros",
"harmony_types", "harmony_types",
"helm-wrapper-rs", "helm-wrapper-rs",
"hex",
"http 1.3.1", "http 1.3.1",
"inquire", "inquire",
"k3d-rs", "k3d-rs",
@ -1426,6 +1443,7 @@ dependencies = [
"non-blank-string-rs", "non-blank-string-rs",
"opnsense-config", "opnsense-config",
"opnsense-config-xml", "opnsense-config-xml",
"rand 0.9.1",
"reqwest 0.11.27", "reqwest 0.11.27",
"russh", "russh",
"rust-ipmi", "rust-ipmi",
@ -1550,6 +1568,12 @@ version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]] [[package]]
name = "hex-literal" name = "hex-literal"
version = "0.4.1" version = "0.4.1"

View File

@ -137,8 +137,9 @@ Our approach addresses both customer and team multi-tenancy requirements:
### Implementation Roadmap ### Implementation Roadmap
1. **Phase 1**: Implement VPN access and manual tenant provisioning 1. **Phase 1**: Implement VPN access and manual tenant provisioning
2. **Phase 2**: Deploy TenantScore automation for namespace, RBAC, and NetworkPolicy management 2. **Phase 2**: Deploy TenantScore automation for namespace, RBAC, and NetworkPolicy management
3. **Phase 3**: Integrate Keycloak for centralized identity management 4. **Phase 3**: Work on privilege escalation from pods, audit for weaknesses, enforce security policies on pod runtimes
4. **Phase 4**: Add advanced monitoring and per-tenant observability 3. **Phase 4**: Integrate Keycloak for centralized identity management
4. **Phase 5**: Add advanced monitoring and per-tenant observability
### TenantScore Structure Preview ### TenantScore Structure Preview
```rust ```rust

View File

@ -0,0 +1,41 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: tenant-isolation-policy
namespace: testtenant
spec:
podSelector: {} # Selects all pods in the namespace
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {} # Allow from all pods in the same namespace
egress:
- to:
- podSelector: {} # Allow to all pods in the same namespace
- to:
- podSelector: {}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: openshift-dns # Target the openshift-dns namespace
# Note, only opening port 53 is not enough, will have to dig deeper into this one eventually
# ports:
# - protocol: UDP
# port: 53
# - protocol: TCP
# port: 53
# Allow egress to public internet only
- to:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 10.0.0.0/8 # RFC1918
- 172.16.0.0/12 # RFC1918
- 192.168.0.0/16 # RFC1918
- 169.254.0.0/16 # Link-local
- 127.0.0.0/8 # Loopback
- 224.0.0.0/4 # Multicast
- 240.0.0.0/4 # Reserved
- 100.64.0.0/10 # Carrier-grade NAT
- 0.0.0.0/8 # Reserved

View File

@ -0,0 +1,95 @@
apiVersion: v1
kind: Namespace
metadata:
name: testtenant
---
apiVersion: v1
kind: Namespace
metadata:
name: testtenant2
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-web
namespace: testtenant
spec:
replicas: 1
selector:
matchLabels:
app: test-web
template:
metadata:
labels:
app: test-web
spec:
containers:
- name: nginx
image: nginxinc/nginx-unprivileged
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: test-web
namespace: testtenant
spec:
selector:
app: test-web
ports:
- port: 80
targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-client
namespace: testtenant
spec:
replicas: 1
selector:
matchLabels:
app: test-client
template:
metadata:
labels:
app: test-client
spec:
containers:
- name: curl
image: curlimages/curl:latest
command: ["/bin/sh", "-c", "sleep 3600"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-web
namespace: testtenant2
spec:
replicas: 1
selector:
matchLabels:
app: test-web
template:
metadata:
labels:
app: test-web
spec:
containers:
- name: nginx
image: nginxinc/nginx-unprivileged
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: test-web
namespace: testtenant2
spec:
selector:
app: test-web
ports:
- port: 80
targetPort: 8080

View File

@ -0,0 +1,18 @@
[package]
name = "example-tenant"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@ -0,0 +1,41 @@
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::tenant::TenantScore,
topology::{K8sAnywhereTopology, tenant::TenantConfig},
};
#[tokio::main]
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::default(),
name: "TestTenant".to_string(),
..Default::default()
},
};
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
maestro.register_all(vec![Box::new(tenant)]);
harmony_cli::init(maestro, None).await.unwrap();
}
// TODO write tests
// - Create Tenant with default config mostly, make sure namespace is created
// - deploy sample client/server app with nginx unprivileged and a service
// - exec in the client pod and validate the following
// - can reach internet
// - can reach server pod
// - can resolve dns queries to internet
// - can resolve dns queries to services
// - cannot reach services and pods in other namespaces
// - Create Tenant with specific cpu/ram/storage requests / limits and make sure they are enforced by trying to
// deploy a pod with lower requests/limits (accepted) and higher requests/limits (rejected)
// - Create TenantCredentials and make sure they give only access to the correct tenant

View File

@ -6,6 +6,8 @@ readme.workspace = true
license.workspace = true license.workspace = true
[dependencies] [dependencies]
rand = "0.9"
hex = "0.4"
libredfish = "0.1.1" libredfish = "0.1.1"
reqwest = { version = "0.11", features = ["blocking", "json"] } reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0" russh = "0.45.0"

View File

@ -1,5 +1,23 @@
use rand::distr::Alphanumeric;
use rand::distr::SampleString;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// A unique identifier designed for ease of use.
///
/// You can pass it any String to use and Id, or you can use the default format with `Id::default()`
///
/// The default format looks like this
///
/// `462d4c_g2COgai`
///
/// The first part is the unix timesamp in hexadecimal which makes Id easily sorted by creation time.
/// Second part is a serie of 7 random characters.
///
/// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per
/// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Id { pub struct Id {
value: String, value: String,
@ -16,3 +34,20 @@ impl std::fmt::Display for Id {
f.write_str(&self.value) f.write_str(&self.value)
} }
} }
impl Default for Id {
fn default() -> Self {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
let timestamp = since_the_epoch.as_secs();
let hex_timestamp = format!("{:x}", timestamp & 0xffffff);
let random_part: String = Alphanumeric.sample_string(&mut rand::rng(), 7);
let value = format!("{}_{}", hex_timestamp, random_part);
Self { value }
}
}

View File

@ -1,11 +1,11 @@
use derive_new::new; use derive_new::new;
use k8s_openapi::NamespaceResourceScope; use k8s_openapi::{ClusterResourceScope, NamespaceResourceScope};
use kube::{ use kube::{
Api, Client, Config, Error, Resource, Api, Client, Config, Error, Resource,
api::PostParams, api::PostParams,
config::{KubeConfigOptions, Kubeconfig}, config::{KubeConfigOptions, Kubeconfig},
}; };
use log::error; use log::{debug, error, trace};
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
#[derive(new)] #[derive(new)]
@ -20,52 +20,35 @@ impl K8sClient {
}) })
} }
pub async fn apply_all< pub async fn apply<K>(&self, resource: &K, ns: Option<&str>) -> Result<K, Error>
K: Resource<Scope = NamespaceResourceScope>
+ std::fmt::Debug
+ Sync
+ DeserializeOwned
+ Default
+ serde::Serialize
+ Clone,
>(
&self,
resource: &Vec<K>,
) -> Result<Vec<K>, kube::Error>
where where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as Resource>::Scope: ApplyStrategy<K>,
<K as kube::Resource>::DynamicType: Default, <K as kube::Resource>::DynamicType: Default,
{ {
let mut result = vec![]; debug!(
for r in resource.iter() { "Applying resource {:?} with ns {:?}",
let api: Api<K> = Api::all(self.client.clone()); resource.meta().name,
result.push(api.create(&PostParams::default(), &r).await?); ns
} );
Ok(result) trace!("{:#?}", serde_json::to_string(resource));
let api: Api<K> = <<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, ns);
api.create(&PostParams::default(), &resource).await
} }
pub async fn apply_namespaced<K>( pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
&self,
resource: &Vec<K>,
ns: Option<&str>,
) -> Result<Vec<K>, Error>
where where
K: Resource<Scope = NamespaceResourceScope> K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
+ Clone <K as Resource>::Scope: ApplyStrategy<K>,
+ std::fmt::Debug
+ DeserializeOwned
+ serde::Serialize
+ Default,
<K as kube::Resource>::DynamicType: Default, <K as kube::Resource>::DynamicType: Default,
{ {
let mut resources = Vec::new(); let mut result = Vec::new();
for r in resource.iter() { for r in resource.iter() {
let api: Api<K> = match ns { result.push(self.apply(r, ns).await?);
Some(ns) => Api::namespaced(self.client.clone(), ns),
None => Api::default_namespaced(self.client.clone()),
};
resources.push(api.create(&PostParams::default(), &r).await?);
} }
Ok(resources)
Ok(result)
} }
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> { pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
@ -86,3 +69,35 @@ impl K8sClient {
)) ))
} }
} }
pub trait ApplyStrategy<K: Resource> {
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
}
/// Implementation for all resources that are cluster-scoped.
/// It will always use `Api::all` and ignore the namespace parameter.
impl<K> ApplyStrategy<K> for ClusterResourceScope
where
K: Resource<Scope = ClusterResourceScope>,
<K as kube::Resource>::DynamicType: Default,
{
fn get_api(client: &Client, _ns: Option<&str>) -> Api<K> {
Api::all(client.clone())
}
}
/// Implementation for all resources that are namespace-scoped.
/// It will use `Api::namespaced` if a namespace is provided, otherwise
/// it falls back to the default namespace configured in your kubeconfig.
impl<K> ApplyStrategy<K> for NamespaceResourceScope
where
K: Resource<Scope = NamespaceResourceScope>,
<K as kube::Resource>::DynamicType: Default,
{
fn get_api(client: &Client, ns: Option<&str>) -> Api<K> {
match ns {
Some(ns) => Api::namespaced(client.clone(), ns),
None => Api::default_namespaced(client.clone()),
}
}
}

View File

@ -1,4 +1,4 @@
use std::{process::Command, sync::Arc}; use std::{io::Error, process::Command, sync::Arc};
use async_trait::async_trait; use async_trait::async_trait;
use inquire::Confirm; use inquire::Confirm;
@ -6,6 +6,7 @@ use log::{info, warn};
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
use crate::{ use crate::{
data::Id,
executors::ExecutorError, executors::ExecutorError,
interpret::{InterpretError, Outcome}, interpret::{InterpretError, Outcome},
inventory::Inventory, inventory::Inventory,
@ -170,6 +171,22 @@ impl K8sAnywhereTopology {
Ok(Some(state)) Ok(Some(state))
} }
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
if let Some(_) = self.tenant_manager.get() {
return Ok(());
}
self.tenant_manager
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
let k8s_client = self.k8s_client().await?;
Ok(K8sTenantManager::new(k8s_client))
})
.await
.unwrap();
Ok(())
}
fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> { fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> {
match self.tenant_manager.get() { match self.tenant_manager.get() {
Some(t) => Ok(t), Some(t) => Ok(t),
@ -217,6 +234,10 @@ impl Topology for K8sAnywhereTopology {
"No K8s client could be found or installed".to_string(), "No K8s client could be found or installed".to_string(),
))?; ))?;
self.ensure_k8s_tenant_manager()
.await
.map_err(|e| InterpretError::new(e))?;
match self.is_helm_available() { match self.is_helm_available() {
Ok(()) => Ok(Outcome::success(format!( Ok(()) => Ok(Outcome::success(format!(
"{} + helm available", "{} + helm available",
@ -239,27 +260,27 @@ impl TenantManager for K8sAnywhereTopology {
async fn update_tenant_resource_limits( async fn update_tenant_resource_limits(
&self, &self,
tenant_name: &str, tenant_id: &Id,
new_limits: &ResourceLimits, new_limits: &ResourceLimits,
) -> Result<(), ExecutorError> { ) -> Result<(), ExecutorError> {
self.get_k8s_tenant_manager()? self.get_k8s_tenant_manager()?
.update_tenant_resource_limits(tenant_name, new_limits) .update_tenant_resource_limits(tenant_id, new_limits)
.await .await
} }
async fn update_tenant_network_policy( async fn update_tenant_network_policy(
&self, &self,
tenant_name: &str, tenant_id: &Id,
new_policy: &TenantNetworkPolicy, new_policy: &TenantNetworkPolicy,
) -> Result<(), ExecutorError> { ) -> Result<(), ExecutorError> {
self.get_k8s_tenant_manager()? self.get_k8s_tenant_manager()?
.update_tenant_network_policy(tenant_name, new_policy) .update_tenant_network_policy(tenant_id, new_policy)
.await .await
} }
async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError> { async fn deprovision_tenant(&self, tenant_id: &Id) -> Result<(), ExecutorError> {
self.get_k8s_tenant_manager()? self.get_k8s_tenant_manager()?
.deprovision_tenant(tenant_name) .deprovision_tenant(tenant_id)
.await .await
} }
} }

View File

@ -1,9 +1,17 @@
use std::sync::Arc; use std::sync::Arc;
use crate::{executors::ExecutorError, topology::k8s::K8sClient}; use crate::{data::Id, executors::ExecutorError, topology::k8s::K8sClient};
use async_trait::async_trait; use async_trait::async_trait;
use derive_new::new; use derive_new::new;
use k8s_openapi::api::core::v1::Namespace; use k8s_openapi::{
NamespaceResourceScope,
api::{
core::v1::{Namespace, ResourceQuota},
networking::v1::NetworkPolicy,
},
};
use kube::Resource;
use serde::de::DeserializeOwned;
use serde_json::json; use serde_json::json;
use super::{ResourceLimits, TenantConfig, TenantManager, TenantNetworkPolicy}; use super::{ResourceLimits, TenantConfig, TenantManager, TenantNetworkPolicy};
@ -13,9 +21,29 @@ pub struct K8sTenantManager {
k8s_client: Arc<K8sClient>, k8s_client: Arc<K8sClient>,
} }
#[async_trait] impl K8sTenantManager {
impl TenantManager for K8sTenantManager { fn get_namespace_name(&self, config: &TenantConfig) -> String {
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> { config.name.clone()
}
fn ensure_constraints(&self, namespace: &Namespace) -> Result<(), ExecutorError> {
todo!("Validate that when tenant already exists (by id) that name has not changed");
todo!("Make sure other Tenant constraints are respected by this k8s implementation");
}
async fn apply_resource<
K: Resource + std::fmt::Debug + Sync + DeserializeOwned + Default + serde::Serialize + Clone,
>(
&self,
resource: K,
) -> Result<K, ExecutorError>
where
<K as kube::Resource>::DynamicType: Default,
{
todo!("Apply tenant labels on resource and apply resource with k8s client properly")
}
fn build_namespace(&self, config: &TenantConfig) -> Result<Namespace, ExecutorError> {
let namespace = json!( let namespace = json!(
{ {
"apiVersion": "v1", "apiVersion": "v1",
@ -25,14 +53,19 @@ impl TenantManager for K8sTenantManager {
"harmony.nationtech.io/tenant.id": config.id, "harmony.nationtech.io/tenant.id": config.id,
"harmony.nationtech.io/tenant.name": config.name, "harmony.nationtech.io/tenant.name": config.name,
}, },
"name": config.name, "name": self.get_namespace_name(config),
}, },
} }
); );
todo!("Validate that when tenant already exists (by id) that name has not changed"); serde_json::from_value(namespace).map_err(|e| {
ExecutorError::ConfigurationError(format!(
let namespace: Namespace = serde_json::from_value(namespace).unwrap(); "Could not build TenantManager Namespace. {}",
e
))
})
}
fn build_resource_quota(&self, config: &TenantConfig) -> Result<ResourceQuota, ExecutorError> {
let resource_quota = json!( let resource_quota = json!(
{ {
"apiVersion": "v1", "apiVersion": "v1",
@ -47,7 +80,7 @@ impl TenantManager for K8sTenantManager {
"harmony.nationtech.io/tenant.id": config.id, "harmony.nationtech.io/tenant.id": config.id,
"harmony.nationtech.io/tenant.name": config.name, "harmony.nationtech.io/tenant.name": config.name,
}, },
"namespace": config.name, "namespace": self.get_namespace_name(config),
}, },
"spec": { "spec": {
"hard": { "hard": {
@ -71,11 +104,104 @@ impl TenantManager for K8sTenantManager {
} }
); );
serde_json::from_value(resource_quota).map_err(|e| {
ExecutorError::ConfigurationError(format!(
"Could not build TenantManager ResourceQuota. {}",
e
))
})
}
fn build_network_policy(&self, config: &TenantConfig) -> Result<NetworkPolicy, ExecutorError> {
let network_policy = json!({
"apiVersion": "networking.k8s.io/v1",
"kind": "NetworkPolicy",
"metadata": {
"name": format!("{}-network-policy", config.name),
},
"spec": {
"podSelector": {},
"egress": [
{ "to": [ {"podSelector": {}}]},
{ "to":
[
{
"podSelector": {},
"namespaceSelector": {
"matchLabels": {
"kubernetes.io/metadata.name":"openshift-dns"
}
}
},
]
},
{ "to": [
{
"ipBlock": {
"cidr": "0.0.0.0/0",
// See https://en.wikipedia.org/wiki/Reserved_IP_addresses
"except": [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
"192.0.0.0/24",
"192.0.2.0/24",
"192.88.99.0/24",
"192.18.0.0/15",
"198.51.100.0/24",
"169.254.0.0/16",
"203.0.113.0/24",
"127.0.0.0/8",
// Not sure we should block this one as it is
// used for multicast. But better block more than less.
"224.0.0.0/4",
"240.0.0.0/4",
"100.64.0.0/10",
"233.252.0.0/24",
"0.0.0.0/8",
],
}
}
]
},
],
"ingress": [
{ "from": [ {"podSelector": {}}]}
],
"policyTypes": [
"Ingress", "Egress",
]
}
});
serde_json::from_value(network_policy).map_err(|e| {
ExecutorError::ConfigurationError(format!(
"Could not build TenantManager NetworkPolicy. {}",
e
))
})
}
}
#[async_trait]
impl TenantManager for K8sTenantManager {
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
let namespace = self.build_namespace(config)?;
let resource_quota = self.build_resource_quota(config)?;
let network_policy = self.build_network_policy(config)?;
self.ensure_constraints(&namespace)?;
self.apply_resource(namespace).await?;
self.apply_resource(resource_quota).await?;
self.apply_resource(network_policy).await?;
todo!();
} }
async fn update_tenant_resource_limits( async fn update_tenant_resource_limits(
&self, &self,
tenant_name: &str, tenant_id: &Id,
new_limits: &ResourceLimits, new_limits: &ResourceLimits,
) -> Result<(), ExecutorError> { ) -> Result<(), ExecutorError> {
todo!() todo!()
@ -83,13 +209,13 @@ impl TenantManager for K8sTenantManager {
async fn update_tenant_network_policy( async fn update_tenant_network_policy(
&self, &self,
tenant_name: &str, tenant_id: &Id,
new_policy: &TenantNetworkPolicy, new_policy: &TenantNetworkPolicy,
) -> Result<(), ExecutorError> { ) -> Result<(), ExecutorError> {
todo!() todo!()
} }
async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError> { async fn deprovision_tenant(&self, tenant_id: &Id) -> Result<(), ExecutorError> {
todo!() todo!()
} }
} }

View File

@ -16,31 +16,20 @@ pub trait TenantManager {
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>; async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>;
/// Updates the resource limits for an existing tenant. /// Updates the resource limits for an existing tenant.
///
/// # Arguments
/// * `tenant_name`: The logical name of the tenant to update.
/// * `new_limits`: The new set of resource limits to apply.
async fn update_tenant_resource_limits( async fn update_tenant_resource_limits(
&self, &self,
tenant_name: &str, tenant_id: &Id,
new_limits: &ResourceLimits, new_limits: &ResourceLimits,
) -> Result<(), ExecutorError>; ) -> Result<(), ExecutorError>;
/// Updates the high-level network isolation policy for an existing tenant. /// Updates the high-level network isolation policy for an existing tenant.
///
/// # Arguments
/// * `tenant_name`: The logical name of the tenant to update.
/// * `new_policy`: The new network policy to apply.
async fn update_tenant_network_policy( async fn update_tenant_network_policy(
&self, &self,
tenant_name: &str, tenant_id: &Id,
new_policy: &TenantNetworkPolicy, new_policy: &TenantNetworkPolicy,
) -> Result<(), ExecutorError>; ) -> Result<(), ExecutorError>;
/// Decommissions an existing tenant, removing its isolated context and associated resources. /// Decommissions an existing tenant, removing its isolated context and associated resources.
/// This operation should be idempotent. /// This operation should be idempotent.
/// async fn deprovision_tenant(&self, tenant_id: &Id) -> Result<(), ExecutorError>;
/// # Arguments
/// * `tenant_name`: The logical name of the tenant to deprovision.
async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError>;
} }

View File

@ -27,6 +27,28 @@ pub struct TenantConfig {
pub labels_or_tags: HashMap<String, String>, pub labels_or_tags: HashMap<String, String>,
} }
impl Default for TenantConfig {
fn default() -> Self {
let id = Id::default();
Self {
name: format!("tenant_{id}"),
id,
resource_limits: ResourceLimits {
cpu_request_cores: 4.0,
cpu_limit_cores: 4.0,
memory_request_gb: 4.0,
memory_limit_gb: 4.0,
storage_total_gb: 20.0,
},
network_policy: TenantNetworkPolicy {
default_inter_tenant_ingress: InterTenantIngressPolicy::DenyAll,
default_internet_egress: InternetEgressPolicy::AllowAll,
},
labels_or_tags: HashMap::new(),
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceLimits { pub struct ResourceLimits {
/// Requested/guaranteed CPU cores (e.g., 2.0). /// Requested/guaranteed CPU cores (e.g., 2.0).

View File

@ -1,5 +1,6 @@
use harmony_macros::ingress_path; use harmony_macros::ingress_path;
use k8s_openapi::api::networking::v1::Ingress; use k8s_openapi::api::networking::v1::Ingress;
use log::{debug, trace};
use serde::Serialize; use serde::Serialize;
use serde_json::json; use serde_json::json;
@ -56,22 +57,24 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
let ingress = json!( let ingress = json!(
{ {
"metadata": { "metadata": {
"name": self.name "name": self.name.to_string(),
}, },
"spec": { "spec": {
"rules": [ "rules": [
{ "host": self.host, { "host": self.host.to_string(),
"http": { "http": {
"paths": [ "paths": [
{ {
"path": path, "path": path,
"pathType": path_type.as_str(), "pathType": path_type.as_str(),
"backend": [ "backend": {
{ "service": {
"service": self.backend_service, "name": self.backend_service.to_string(),
"port": self.port "port": {
"number": self.port,
}
}
} }
]
} }
] ]
} }
@ -81,13 +84,16 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
} }
); );
trace!("Building ingresss object from Value {ingress:#}");
let ingress: Ingress = serde_json::from_value(ingress).unwrap(); let ingress: Ingress = serde_json::from_value(ingress).unwrap();
debug!(
"Successfully built Ingress for host {:?}",
ingress.metadata.name
);
Box::new(K8sResourceInterpret { Box::new(K8sResourceInterpret {
score: K8sResourceScore::single( score: K8sResourceScore::single(
ingress.clone(), ingress.clone(),
self.namespace self.namespace.clone().map(|f| f.to_string()),
.clone()
.map(|f| f.as_c_str().to_str().unwrap().to_string()),
), ),
}) })
} }

View File

@ -1,6 +1,7 @@
use async_trait::async_trait; use async_trait::async_trait;
use k8s_openapi::NamespaceResourceScope; use k8s_openapi::NamespaceResourceScope;
use kube::Resource; use kube::Resource;
use log::info;
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use crate::{ use crate::{
@ -75,11 +76,12 @@ where
_inventory: &Inventory, _inventory: &Inventory,
topology: &T, topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
info!("Applying {} resources", self.score.resource.len());
topology topology
.k8s_client() .k8s_client()
.await .await
.expect("Environment should provide enough information to instanciate a client") .expect("Environment should provide enough information to instanciate a client")
.apply_namespaced(&self.score.resource, self.score.namespace.as_deref()) .apply_many(&self.score.resource, self.score.namespace.as_deref())
.await?; .await?;
Ok(Outcome::success( Ok(Outcome::success(

View File

@ -14,7 +14,7 @@ use crate::{
#[derive(Debug, Serialize, Clone)] #[derive(Debug, Serialize, Clone)]
pub struct TenantScore { pub struct TenantScore {
config: TenantConfig, pub config: TenantConfig,
} }
impl<T: Topology + TenantManager> Score<T> for TenantScore { impl<T: Topology + TenantManager> Score<T> for TenantScore {