Compare commits
36 Commits
feat/slack
...
feat/init_
| Author | SHA1 | Date | |
|---|---|---|---|
| 14fc4345c1 | |||
| 8e472e4c65 | |||
| ec17ccc246 | |||
| 5127f44ab3 | |||
| 2ff70db0b1 | |||
| e17ac1af83 | |||
| 31e59937dc | |||
| 12eb4ae31f | |||
| a2be9457b9 | |||
| 0d56fbc09d | |||
| 56dc1e93c1 | |||
| 691540fe64 | |||
| 7e3f1b1830 | |||
| b631e8ccbb | |||
| 60f2f31d6c | |||
| 045954f8d3 | |||
| 27f1a9dbdd | |||
| 7c809bf18a | |||
| 6490e5e82a | |||
| 5e51f7490c | |||
| 97fba07f4e | |||
| 624e4330bb | |||
| e7917843bc | |||
| 7cd541bdd8 | |||
| 270dd49567 | |||
| 0187300473 | |||
| bf16566b4e | |||
| 895fb02f4e | |||
| 88d6af9815 | |||
| 5aa9dc701f | |||
| f4ef895d2e | |||
| 6e7148a945 | |||
| 83453273c6 | |||
| 76ae5eb747 | |||
| 9c51040f3b | |||
| 19bd47a545 |
14
.gitea/workflows/check.yml
Normal file
14
.gitea/workflows/check.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
name: Run Check Script
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
runs-on: rust-cargo
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run check script
|
||||||
|
run: bash check.sh
|
||||||
36
CONTRIBUTING.md
Normal file
36
CONTRIBUTING.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Contributing to the Harmony project
|
||||||
|
|
||||||
|
## Write small P-R
|
||||||
|
|
||||||
|
Aim for the smallest piece of work that is mergeable.
|
||||||
|
|
||||||
|
Mergeable means that :
|
||||||
|
|
||||||
|
- it does not break the build
|
||||||
|
- it moves the codebase one step forward
|
||||||
|
|
||||||
|
P-Rs can be many things, they do not have to be complete features.
|
||||||
|
|
||||||
|
### What a P-R **should** be
|
||||||
|
|
||||||
|
- Introduce a new trait : This will be the place to discuss the new trait addition, its design and implementation
|
||||||
|
- A new implementation of a trait : a new concrete implementation of the LoadBalancer trait
|
||||||
|
- A new CI check : something that improves quality, robustness, ci performance
|
||||||
|
- Documentation improvements
|
||||||
|
- Refactoring
|
||||||
|
- Bugfix
|
||||||
|
|
||||||
|
### What a P-R **should not** be
|
||||||
|
|
||||||
|
- Large. Anything over 200 lines (excluding generated lines) should have a very good reason to be this large.
|
||||||
|
- A mix of refactoring, bug fixes and new features.
|
||||||
|
- Introducing multiple new features or ideas at once.
|
||||||
|
- Multiple new implementations of a trait/functionnality at once
|
||||||
|
|
||||||
|
The general idea is to keep P-Rs small and single purpose.
|
||||||
|
|
||||||
|
## Commit message formatting
|
||||||
|
|
||||||
|
We follow conventional commits guidelines.
|
||||||
|
|
||||||
|
https://www.conventionalcommits.org/en/v1.0.0/
|
||||||
24
Cargo.lock
generated
24
Cargo.lock
generated
@@ -1070,6 +1070,21 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-tenant"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-tui"
|
name = "example-tui"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -1409,12 +1424,14 @@ dependencies = [
|
|||||||
"derive-new",
|
"derive-new",
|
||||||
"directories",
|
"directories",
|
||||||
"dockerfile_builder",
|
"dockerfile_builder",
|
||||||
|
"dyn-clone",
|
||||||
"email_address",
|
"email_address",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"fqdn",
|
"fqdn",
|
||||||
"harmony_macros",
|
"harmony_macros",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"helm-wrapper-rs",
|
"helm-wrapper-rs",
|
||||||
|
"hex",
|
||||||
"http 1.3.1",
|
"http 1.3.1",
|
||||||
"inquire",
|
"inquire",
|
||||||
"k3d-rs",
|
"k3d-rs",
|
||||||
@@ -1426,6 +1443,7 @@ dependencies = [
|
|||||||
"non-blank-string-rs",
|
"non-blank-string-rs",
|
||||||
"opnsense-config",
|
"opnsense-config",
|
||||||
"opnsense-config-xml",
|
"opnsense-config-xml",
|
||||||
|
"rand 0.9.1",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
"russh",
|
"russh",
|
||||||
"rust-ipmi",
|
"rust-ipmi",
|
||||||
@@ -1550,6 +1568,12 @@ version = "0.3.9"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
|
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hex"
|
||||||
|
version = "0.4.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hex-literal"
|
name = "hex-literal"
|
||||||
version = "0.4.1"
|
version = "0.4.1"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Architecture Decision Record: \<Title\>
|
# Architecture Decision Record: \<Title\>
|
||||||
|
|
||||||
Name: \<Name\>
|
Initial Author: \<Name\>
|
||||||
|
|
||||||
Initial Date: \<Date\>
|
Initial Date: \<Date\>
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Architecture Decision Record: Helm and Kustomize Handling
|
# Architecture Decision Record: Helm and Kustomize Handling
|
||||||
|
|
||||||
Name: Taha Hawa
|
Initial Author: Taha Hawa
|
||||||
|
|
||||||
Initial Date: 2025-04-15
|
Initial Date: 2025-04-15
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Architecture Decision Record: Monitoring and Alerting
|
# Architecture Decision Record: Monitoring and Alerting
|
||||||
|
|
||||||
Proposed by: Willem Rolleman
|
Initial Author : Willem Rolleman
|
||||||
Date : April 28 2025
|
Date : April 28 2025
|
||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|||||||
161
adr/011-multi-tenant-cluster.md
Normal file
161
adr/011-multi-tenant-cluster.md
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# Architecture Decision Record: Multi-Tenancy Strategy for Harmony Managed Clusters
|
||||||
|
|
||||||
|
Initial Author: Jean-Gabriel Gill-Couture
|
||||||
|
|
||||||
|
Initial Date: 2025-05-26
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Proposed
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Harmony manages production OKD/Kubernetes clusters that serve multiple clients with varying trust levels and operational requirements. We need a multi-tenancy strategy that provides:
|
||||||
|
|
||||||
|
1. **Strong isolation** between client workloads while maintaining operational simplicity
|
||||||
|
2. **Controlled API access** allowing clients self-service capabilities within defined boundaries
|
||||||
|
3. **Security-first approach** protecting both the cluster infrastructure and tenant data
|
||||||
|
4. **Harmony-native implementation** using our Score/Interpret pattern for automated tenant provisioning
|
||||||
|
5. **Scalable management** supporting both small trusted clients and larger enterprise customers
|
||||||
|
|
||||||
|
The official Kubernetes multi-tenancy documentation identifies two primary models: namespace-based isolation and virtual control planes per tenant. Given Harmony's focus on operational simplicity, provider-agnostic abstractions (ADR-003), and hexagonal architecture (ADR-002), we must choose an approach that balances security, usability, and maintainability.
|
||||||
|
|
||||||
|
Our clients represent a hybrid tenancy model:
|
||||||
|
- **Customer multi-tenancy**: Each client operates independently with no cross-tenant trust
|
||||||
|
- **Team multi-tenancy**: Individual clients may have multiple team members requiring coordinated access
|
||||||
|
- **API access requirement**: Unlike pure SaaS scenarios, clients need controlled Kubernetes API access for self-service operations
|
||||||
|
|
||||||
|
The official kubernetes documentation on multi tenancy heavily inspired this ADR : https://kubernetes.io/docs/concepts/security/multi-tenancy/
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Implement **namespace-based multi-tenancy** with the following architecture:
|
||||||
|
|
||||||
|
### 1. Network Security Model
|
||||||
|
- **Private cluster access**: Kubernetes API and OpenShift console accessible only via WireGuard VPN
|
||||||
|
- **No public exposure**: Control plane endpoints remain internal to prevent unauthorized access attempts
|
||||||
|
- **VPN-based authentication**: Initial access control through WireGuard client certificates
|
||||||
|
|
||||||
|
### 2. Tenant Isolation Strategy
|
||||||
|
- **Dedicated namespace per tenant**: Each client receives an isolated namespace with access limited only to the required resources and operations
|
||||||
|
- **Complete network isolation**: NetworkPolicies prevent cross-namespace communication while allowing full egress to public internet
|
||||||
|
- **Resource governance**: ResourceQuotas and LimitRanges enforce CPU, memory, and storage consumption limits
|
||||||
|
- **Storage access control**: Clients can create PersistentVolumeClaims but cannot directly manipulate PersistentVolumes or access other tenants' storage
|
||||||
|
|
||||||
|
### 3. Access Control Framework
|
||||||
|
- **Principle of Least Privilege**: RBAC grants only necessary permissions within tenant namespace scope
|
||||||
|
- **Namespace-scoped**: Clients can create/modify/delete resources within their namespace
|
||||||
|
- **Cluster-level restrictions**: No access to cluster-wide resources, other namespaces, or sensitive cluster operations
|
||||||
|
- **Whitelisted operations**: Controlled self-service capabilities for ingress, secrets, configmaps, and workload management
|
||||||
|
|
||||||
|
### 4. Identity Management Evolution
|
||||||
|
- **Phase 1**: Manual provisioning of VPN access and Kubernetes ServiceAccounts/Users
|
||||||
|
- **Phase 2**: Migration to Keycloak-based identity management (aligning with ADR-006) for centralized authentication and lifecycle management
|
||||||
|
|
||||||
|
### 5. Harmony Integration
|
||||||
|
- **TenantScore implementation**: Declarative tenant provisioning using Harmony's Score/Interpret pattern
|
||||||
|
- **Topology abstraction**: Tenant configuration abstracted from underlying Kubernetes implementation details
|
||||||
|
- **Automated deployment**: Complete tenant setup automated through Harmony's orchestration capabilities
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
|
||||||
|
### Network Security Through VPN Access
|
||||||
|
- **Defense in depth**: VPN requirement adds critical security layer preventing unauthorized cluster access
|
||||||
|
- **Simplified firewall rules**: No need for complex public endpoint protections or rate limiting
|
||||||
|
- **Audit capability**: VPN access provides clear audit trail of cluster connections
|
||||||
|
- **Aligns with enterprise practices**: Most enterprise customers already use VPN infrastructure
|
||||||
|
|
||||||
|
### Namespace Isolation vs Virtual Control Planes
|
||||||
|
Following Kubernetes official guidance, namespace isolation provides:
|
||||||
|
- **Lower resource overhead**: Virtual control planes require dedicated etcd, API server, and controller manager per tenant
|
||||||
|
- **Operational simplicity**: Single control plane to maintain, upgrade, and monitor
|
||||||
|
- **Cross-tenant service integration**: Enables future controlled cross-tenant communication if required
|
||||||
|
- **Proven stability**: Namespace-based isolation is well-tested and widely deployed
|
||||||
|
- **Cost efficiency**: Significantly lower infrastructure costs compared to dedicated control planes
|
||||||
|
|
||||||
|
### Hybrid Tenancy Model Suitability
|
||||||
|
Our approach addresses both customer and team multi-tenancy requirements:
|
||||||
|
- **Customer isolation**: Strong network and RBAC boundaries prevent cross-tenant interference
|
||||||
|
- **Team collaboration**: Multiple team members can share namespace access through group-based RBAC
|
||||||
|
- **Self-service balance**: Controlled API access enables client autonomy without compromising security
|
||||||
|
|
||||||
|
### Harmony Architecture Alignment
|
||||||
|
- **Provider agnostic**: TenantScore abstracts multi-tenancy concepts, enabling future support for other Kubernetes distributions
|
||||||
|
- **Hexagonal architecture**: Tenant management becomes an infrastructure capability accessed through well-defined ports
|
||||||
|
- **Declarative automation**: Tenant lifecycle fully managed through Harmony's Score execution model
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
### Positive Consequences
|
||||||
|
- **Strong security posture**: VPN + namespace isolation provides robust tenant separation
|
||||||
|
- **Operational efficiency**: Single cluster management with automated tenant provisioning
|
||||||
|
- **Client autonomy**: Self-service capabilities reduce operational support burden
|
||||||
|
- **Scalable architecture**: Can support hundreds of tenants per cluster without architectural changes
|
||||||
|
- **Future flexibility**: Foundation supports evolution to more sophisticated multi-tenancy models
|
||||||
|
- **Cost optimization**: Shared infrastructure maximizes resource utilization
|
||||||
|
|
||||||
|
### Negative Consequences
|
||||||
|
- **VPN operational overhead**: Requires VPN infrastructure management
|
||||||
|
- **Manual provisioning complexity**: Phase 1 manual user management creates administrative burden
|
||||||
|
- **Network policy dependency**: Requires CNI with NetworkPolicy support (OVN-Kubernetes provides this and is the OKD/Openshift default)
|
||||||
|
- **Cluster-wide resource limitations**: Some advanced Kubernetes features require cluster-wide access
|
||||||
|
- **Single point of failure**: Cluster outage affects all tenants simultaneously
|
||||||
|
|
||||||
|
### Migration Challenges
|
||||||
|
- **Legacy client integration**: Existing clients may need VPN client setup and credential migration
|
||||||
|
- **Monitoring complexity**: Per-tenant observability requires careful metric and log segmentation
|
||||||
|
- **Backup considerations**: Tenant data backup must respect isolation boundaries
|
||||||
|
|
||||||
|
## Alternatives Considered
|
||||||
|
|
||||||
|
### Alternative 1: Virtual Control Plane Per Tenant
|
||||||
|
**Pros**: Complete control plane isolation, full Kubernetes API access per tenant
|
||||||
|
**Cons**: 3-5x higher resource usage, complex cross-tenant networking, operational complexity scales linearly with tenants
|
||||||
|
|
||||||
|
**Rejected**: Resource overhead incompatible with cost-effective multi-tenancy goals
|
||||||
|
|
||||||
|
### Alternative 2: Dedicated Clusters Per Tenant
|
||||||
|
**Pros**: Maximum isolation, independent upgrade cycles, simplified security model
|
||||||
|
**Cons**: Exponential operational complexity, prohibitive costs, resource waste
|
||||||
|
|
||||||
|
**Rejected**: Operational overhead makes this approach unsustainable for multiple clients
|
||||||
|
|
||||||
|
### Alternative 3: Public API with Advanced Authentication
|
||||||
|
**Pros**: No VPN requirement, potentially simpler client access
|
||||||
|
**Cons**: Larger attack surface, complex rate limiting and DDoS protection, increased security monitoring requirements
|
||||||
|
|
||||||
|
**Rejected**: Risk/benefit analysis favors VPN-based access control
|
||||||
|
|
||||||
|
### Alternative 4: Service Mesh Based Isolation
|
||||||
|
**Pros**: Fine-grained traffic control, encryption, advanced observability
|
||||||
|
**Cons**: Significant operational complexity, performance overhead, steep learning curve
|
||||||
|
|
||||||
|
**Rejected**: Complexity overhead outweighs benefits for current requirements; remains option for future enhancement
|
||||||
|
|
||||||
|
## Additional Notes
|
||||||
|
|
||||||
|
### Implementation Roadmap
|
||||||
|
1. **Phase 1**: Implement VPN access and manual tenant provisioning
|
||||||
|
2. **Phase 2**: Deploy TenantScore automation for namespace, RBAC, and NetworkPolicy management
|
||||||
|
4. **Phase 3**: Work on privilege escalation from pods, audit for weaknesses, enforce security policies on pod runtimes
|
||||||
|
3. **Phase 4**: Integrate Keycloak for centralized identity management
|
||||||
|
4. **Phase 5**: Add advanced monitoring and per-tenant observability
|
||||||
|
|
||||||
|
### TenantScore Structure Preview
|
||||||
|
```rust
|
||||||
|
pub struct TenantScore {
|
||||||
|
pub tenant_config: TenantConfig,
|
||||||
|
pub resource_quotas: ResourceQuotaConfig,
|
||||||
|
pub network_isolation: NetworkIsolationPolicy,
|
||||||
|
pub storage_access: StorageAccessConfig,
|
||||||
|
pub rbac_config: RBACConfig,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Future Enhancements
|
||||||
|
- **Cross-tenant service mesh**: For approved inter-tenant communication
|
||||||
|
- **Advanced monitoring**: Per-tenant Prometheus/Grafana instances
|
||||||
|
- **Backup automation**: Tenant-scoped backup policies
|
||||||
|
- **Cost allocation**: Detailed per-tenant resource usage tracking
|
||||||
|
|
||||||
|
This ADR establishes the foundation for secure, scalable multi-tenancy in Harmony-managed clusters while maintaining operational simplicity and cost effectiveness. A follow-up ADR will detail the Tenant abstraction and user management mechanisms within the Harmony framework.
|
||||||
41
adr/tenant/NetworkPolicy.yaml
Normal file
41
adr/tenant/NetworkPolicy.yaml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: NetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: tenant-isolation-policy
|
||||||
|
namespace: testtenant
|
||||||
|
spec:
|
||||||
|
podSelector: {} # Selects all pods in the namespace
|
||||||
|
policyTypes:
|
||||||
|
- Ingress
|
||||||
|
- Egress
|
||||||
|
ingress:
|
||||||
|
- from:
|
||||||
|
- podSelector: {} # Allow from all pods in the same namespace
|
||||||
|
egress:
|
||||||
|
- to:
|
||||||
|
- podSelector: {} # Allow to all pods in the same namespace
|
||||||
|
- to:
|
||||||
|
- podSelector: {}
|
||||||
|
namespaceSelector:
|
||||||
|
matchLabels:
|
||||||
|
kubernetes.io/metadata.name: openshift-dns # Target the openshift-dns namespace
|
||||||
|
# Note, only opening port 53 is not enough, will have to dig deeper into this one eventually
|
||||||
|
# ports:
|
||||||
|
# - protocol: UDP
|
||||||
|
# port: 53
|
||||||
|
# - protocol: TCP
|
||||||
|
# port: 53
|
||||||
|
# Allow egress to public internet only
|
||||||
|
- to:
|
||||||
|
- ipBlock:
|
||||||
|
cidr: 0.0.0.0/0
|
||||||
|
except:
|
||||||
|
- 10.0.0.0/8 # RFC1918
|
||||||
|
- 172.16.0.0/12 # RFC1918
|
||||||
|
- 192.168.0.0/16 # RFC1918
|
||||||
|
- 169.254.0.0/16 # Link-local
|
||||||
|
- 127.0.0.0/8 # Loopback
|
||||||
|
- 224.0.0.0/4 # Multicast
|
||||||
|
- 240.0.0.0/4 # Reserved
|
||||||
|
- 100.64.0.0/10 # Carrier-grade NAT
|
||||||
|
- 0.0.0.0/8 # Reserved
|
||||||
95
adr/tenant/TestDeployment.yaml
Normal file
95
adr/tenant/TestDeployment.yaml
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: testtenant
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: testtenant2
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: test-web
|
||||||
|
namespace: testtenant
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: test-web
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: test-web
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: nginx
|
||||||
|
image: nginxinc/nginx-unprivileged
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: test-web
|
||||||
|
namespace: testtenant
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: test-web
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: test-client
|
||||||
|
namespace: testtenant
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: test-client
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: test-client
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: curl
|
||||||
|
image: curlimages/curl:latest
|
||||||
|
command: ["/bin/sh", "-c", "sleep 3600"]
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: test-web
|
||||||
|
namespace: testtenant2
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: test-web
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: test-web
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: nginx
|
||||||
|
image: nginxinc/nginx-unprivileged
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: test-web
|
||||||
|
namespace: testtenant2
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: test-web
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: 8080
|
||||||
@@ -4,9 +4,7 @@ use harmony::{
|
|||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
lamp::{LAMPConfig, LAMPScore},
|
lamp::{LAMPConfig, LAMPScore},
|
||||||
monitoring::monitoring_alerting::{
|
monitoring::monitoring_alerting::{AlertChannel, MonitoringAlertingStackScore},
|
||||||
AlertChannel, MonitoringAlertingStackScore, WebhookServiceType,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
topology::{K8sAnywhereTopology, Url},
|
topology::{K8sAnywhereTopology, Url},
|
||||||
};
|
};
|
||||||
@@ -50,10 +48,6 @@ async fn main() {
|
|||||||
|
|
||||||
let mut monitoring_stack_score = MonitoringAlertingStackScore::new();
|
let mut monitoring_stack_score = MonitoringAlertingStackScore::new();
|
||||||
monitoring_stack_score.namespace = Some(lamp_stack.config.namespace.clone());
|
monitoring_stack_score.namespace = Some(lamp_stack.config.namespace.clone());
|
||||||
monitoring_stack_score.alert_channel = Some(AlertChannel::WebHookUrl {
|
|
||||||
url: url,
|
|
||||||
webhook_service_type: WebhookServiceType::Discord,
|
|
||||||
});
|
|
||||||
|
|
||||||
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring_stack_score)]);
|
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring_stack_score)]);
|
||||||
// Here we bootstrap the CLI, this gives some nice features if you need them
|
// Here we bootstrap the CLI, this gives some nice features if you need them
|
||||||
|
|||||||
18
examples/tenant/Cargo.toml
Normal file
18
examples/tenant/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-tenant"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
41
examples/tenant/src/main.rs
Normal file
41
examples/tenant/src/main.rs
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
use harmony::{
|
||||||
|
data::Id,
|
||||||
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
|
modules::tenant::TenantScore,
|
||||||
|
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let tenant = TenantScore {
|
||||||
|
config: TenantConfig {
|
||||||
|
id: Id::default(),
|
||||||
|
name: "TestTenant".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::new(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
maestro.register_all(vec![Box::new(tenant)]);
|
||||||
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO write tests
|
||||||
|
// - Create Tenant with default config mostly, make sure namespace is created
|
||||||
|
// - deploy sample client/server app with nginx unprivileged and a service
|
||||||
|
// - exec in the client pod and validate the following
|
||||||
|
// - can reach internet
|
||||||
|
// - can reach server pod
|
||||||
|
// - can resolve dns queries to internet
|
||||||
|
// - can resolve dns queries to services
|
||||||
|
// - cannot reach services and pods in other namespaces
|
||||||
|
// - Create Tenant with specific cpu/ram/storage requests / limits and make sure they are enforced by trying to
|
||||||
|
// deploy a pod with lower requests/limits (accepted) and higher requests/limits (rejected)
|
||||||
|
// - Create TenantCredentials and make sure they give only access to the correct tenant
|
||||||
@@ -6,6 +6,8 @@ readme.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
rand = "0.9"
|
||||||
|
hex = "0.4"
|
||||||
libredfish = "0.1.1"
|
libredfish = "0.1.1"
|
||||||
reqwest = { version = "0.11", features = ["blocking", "json"] }
|
reqwest = { version = "0.11", features = ["blocking", "json"] }
|
||||||
russh = "0.45.0"
|
russh = "0.45.0"
|
||||||
@@ -49,3 +51,4 @@ fqdn = { version = "0.4.6", features = [
|
|||||||
"serde",
|
"serde",
|
||||||
] }
|
] }
|
||||||
temp-dir = "0.1.14"
|
temp-dir = "0.1.14"
|
||||||
|
dyn-clone = "1.0.19"
|
||||||
|
|||||||
@@ -1,6 +1,24 @@
|
|||||||
|
use rand::distr::Alphanumeric;
|
||||||
|
use rand::distr::SampleString;
|
||||||
|
use std::time::SystemTime;
|
||||||
|
use std::time::UNIX_EPOCH;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
/// A unique identifier designed for ease of use.
|
||||||
|
///
|
||||||
|
/// You can pass it any String to use and Id, or you can use the default format with `Id::default()`
|
||||||
|
///
|
||||||
|
/// The default format looks like this
|
||||||
|
///
|
||||||
|
/// `462d4c_g2COgai`
|
||||||
|
///
|
||||||
|
/// The first part is the unix timesamp in hexadecimal which makes Id easily sorted by creation time.
|
||||||
|
/// Second part is a serie of 7 random characters.
|
||||||
|
///
|
||||||
|
/// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per
|
||||||
|
/// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct Id {
|
pub struct Id {
|
||||||
value: String,
|
value: String,
|
||||||
}
|
}
|
||||||
@@ -10,3 +28,26 @@ impl Id {
|
|||||||
Self { value }
|
Self { value }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for Id {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_str(&self.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Id {
|
||||||
|
fn default() -> Self {
|
||||||
|
let start = SystemTime::now();
|
||||||
|
let since_the_epoch = start
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.expect("Time went backwards");
|
||||||
|
let timestamp = since_the_epoch.as_secs();
|
||||||
|
|
||||||
|
let hex_timestamp = format!("{:x}", timestamp & 0xffffff);
|
||||||
|
|
||||||
|
let random_part: String = Alphanumeric.sample_string(&mut rand::rng(), 7);
|
||||||
|
|
||||||
|
let value = format!("{}_{}", hex_timestamp, random_part);
|
||||||
|
Self { value }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ pub enum InterpretName {
|
|||||||
Panic,
|
Panic,
|
||||||
OPNSense,
|
OPNSense,
|
||||||
K3dInstallation,
|
K3dInstallation,
|
||||||
|
TenantInterpret,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for InterpretName {
|
impl std::fmt::Display for InterpretName {
|
||||||
@@ -35,6 +36,7 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::Panic => f.write_str("Panic"),
|
InterpretName::Panic => f.write_str("Panic"),
|
||||||
InterpretName::OPNSense => f.write_str("OPNSense"),
|
InterpretName::OPNSense => f.write_str("OPNSense"),
|
||||||
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
|
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
|
||||||
|
InterpretName::TenantInterpret => f.write_str("Tenant"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{process::Command, sync::Arc};
|
use std::{io::Error, process::Command, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use inquire::Confirm;
|
use inquire::Confirm;
|
||||||
@@ -6,6 +6,7 @@ use log::{info, warn};
|
|||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
executors::ExecutorError,
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
@@ -13,7 +14,13 @@ use crate::{
|
|||||||
topology::LocalhostTopology,
|
topology::LocalhostTopology,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{HelmCommand, K8sclient, Topology, k8s::K8sClient};
|
use super::{
|
||||||
|
HelmCommand, K8sclient, Topology,
|
||||||
|
k8s::K8sClient,
|
||||||
|
tenant::{
|
||||||
|
ResourceLimits, TenantConfig, TenantManager, TenantNetworkPolicy, k8s::K8sTenantManager,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
struct K8sState {
|
struct K8sState {
|
||||||
client: Arc<K8sClient>,
|
client: Arc<K8sClient>,
|
||||||
@@ -21,6 +28,7 @@ struct K8sState {
|
|||||||
message: String,
|
message: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
enum K8sSource {
|
enum K8sSource {
|
||||||
LocalK3d,
|
LocalK3d,
|
||||||
Kubeconfig,
|
Kubeconfig,
|
||||||
@@ -28,6 +36,7 @@ enum K8sSource {
|
|||||||
|
|
||||||
pub struct K8sAnywhereTopology {
|
pub struct K8sAnywhereTopology {
|
||||||
k8s_state: OnceCell<Option<K8sState>>,
|
k8s_state: OnceCell<Option<K8sState>>,
|
||||||
|
tenant_manager: OnceCell<K8sTenantManager>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -51,6 +60,7 @@ impl K8sAnywhereTopology {
|
|||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
k8s_state: OnceCell::new(),
|
k8s_state: OnceCell::new(),
|
||||||
|
tenant_manager: OnceCell::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,6 +169,31 @@ impl K8sAnywhereTopology {
|
|||||||
|
|
||||||
Ok(Some(state))
|
Ok(Some(state))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
|
||||||
|
if let Some(_) = self.tenant_manager.get() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.tenant_manager
|
||||||
|
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
|
||||||
|
let k8s_client = self.k8s_client().await?;
|
||||||
|
Ok(K8sTenantManager::new(k8s_client))
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> {
|
||||||
|
match self.tenant_manager.get() {
|
||||||
|
Some(t) => Ok(t),
|
||||||
|
None => Err(ExecutorError::UnexpectedError(
|
||||||
|
"K8sTenantManager not available".to_string(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct K8sAnywhereConfig {
|
struct K8sAnywhereConfig {
|
||||||
@@ -198,6 +233,10 @@ impl Topology for K8sAnywhereTopology {
|
|||||||
"No K8s client could be found or installed".to_string(),
|
"No K8s client could be found or installed".to_string(),
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
|
self.ensure_k8s_tenant_manager()
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e))?;
|
||||||
|
|
||||||
match self.is_helm_available() {
|
match self.is_helm_available() {
|
||||||
Ok(()) => Ok(Outcome::success(format!(
|
Ok(()) => Ok(Outcome::success(format!(
|
||||||
"{} + helm available",
|
"{} + helm available",
|
||||||
@@ -209,3 +248,38 @@ impl Topology for K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HelmCommand for K8sAnywhereTopology {}
|
impl HelmCommand for K8sAnywhereTopology {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl TenantManager for K8sAnywhereTopology {
|
||||||
|
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
||||||
|
self.get_k8s_tenant_manager()?
|
||||||
|
.provision_tenant(config)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_tenant_resource_limits(
|
||||||
|
&self,
|
||||||
|
tenant_name: &str,
|
||||||
|
new_limits: &ResourceLimits,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
self.get_k8s_tenant_manager()?
|
||||||
|
.update_tenant_resource_limits(tenant_name, new_limits)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_tenant_network_policy(
|
||||||
|
&self,
|
||||||
|
tenant_name: &str,
|
||||||
|
new_policy: &TenantNetworkPolicy,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
self.get_k8s_tenant_manager()?
|
||||||
|
.update_tenant_network_policy(tenant_name, new_policy)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError> {
|
||||||
|
self.get_k8s_tenant_manager()?
|
||||||
|
.deprovision_tenant(tenant_name)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,6 +7,12 @@ use serde::Serialize;
|
|||||||
use super::{IpAddress, LogicalHost};
|
use super::{IpAddress, LogicalHost};
|
||||||
use crate::executors::ExecutorError;
|
use crate::executors::ExecutorError;
|
||||||
|
|
||||||
|
impl std::fmt::Debug for dyn LoadBalancer {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait LoadBalancer: Send + Sync {
|
pub trait LoadBalancer: Send + Sync {
|
||||||
fn get_ip(&self) -> IpAddress;
|
fn get_ip(&self) -> IpAddress;
|
||||||
@@ -32,11 +38,6 @@ pub trait LoadBalancer: Send + Sync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for dyn LoadBalancer {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize)]
|
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||||
pub struct LoadBalancerService {
|
pub struct LoadBalancerService {
|
||||||
pub backend_servers: Vec<BackendServer>,
|
pub backend_servers: Vec<BackendServer>,
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ mod host_binding;
|
|||||||
mod http;
|
mod http;
|
||||||
mod k8s_anywhere;
|
mod k8s_anywhere;
|
||||||
mod localhost;
|
mod localhost;
|
||||||
|
pub mod oberservability;
|
||||||
|
pub mod tenant;
|
||||||
pub use k8s_anywhere::*;
|
pub use k8s_anywhere::*;
|
||||||
pub use localhost::*;
|
pub use localhost::*;
|
||||||
pub mod k8s;
|
pub mod k8s;
|
||||||
|
|||||||
1
harmony/src/domain/topology/oberservability/mod.rs
Normal file
1
harmony/src/domain/topology/oberservability/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod monitoring;
|
||||||
31
harmony/src/domain/topology/oberservability/monitoring.rs
Normal file
31
harmony/src/domain/topology/oberservability/monitoring.rs
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use std::fmt::Debug;
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
use crate::interpret::InterpretError;
|
||||||
|
|
||||||
|
use crate::{interpret::Outcome, topology::Topology};
|
||||||
|
|
||||||
|
/// Represents an entity responsible for collecting and organizing observability data
|
||||||
|
/// from various telemetry sources
|
||||||
|
/// A `Monitor` abstracts the logic required to scrape, aggregate, and structure
|
||||||
|
/// monitoring data, enabling consistent processing regardless of the underlying data source.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Monitor<T: Topology>: Debug + Send + Sync {
|
||||||
|
async fn deploy_monitor(
|
||||||
|
&self,
|
||||||
|
topology: &T,
|
||||||
|
alert_receivers: Vec<AlertReceiver>,
|
||||||
|
) -> Result<Outcome, InterpretError>;
|
||||||
|
|
||||||
|
async fn delete_monitor(
|
||||||
|
&self,
|
||||||
|
topolgy: &T,
|
||||||
|
alert_receivers: Vec<AlertReceiver>,
|
||||||
|
) -> Result<Outcome, InterpretError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AlertReceiver {
|
||||||
|
pub receiver_id: String,
|
||||||
|
}
|
||||||
110
harmony/src/domain/topology/tenant/k8s.rs
Normal file
110
harmony/src/domain/topology/tenant/k8s.rs
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::{executors::ExecutorError, topology::k8s::K8sClient};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use k8s_openapi::api::core::v1::Namespace;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use super::{ResourceLimits, TenantConfig, TenantManager, TenantNetworkPolicy};
|
||||||
|
|
||||||
|
#[derive(new)]
|
||||||
|
pub struct K8sTenantManager {
|
||||||
|
k8s_client: Arc<K8sClient>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl TenantManager for K8sTenantManager {
|
||||||
|
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
||||||
|
let namespace = json!(
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Namespace",
|
||||||
|
"metadata": {
|
||||||
|
"labels": {
|
||||||
|
"harmony.nationtech.io/tenant.id": config.id,
|
||||||
|
"harmony.nationtech.io/tenant.name": config.name,
|
||||||
|
},
|
||||||
|
"name": config.name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
todo!("Validate that when tenant already exists (by id) that name has not changed");
|
||||||
|
|
||||||
|
let namespace: Namespace = serde_json::from_value(namespace).unwrap();
|
||||||
|
|
||||||
|
let resource_quota = json!(
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "List",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "ResourceQuota",
|
||||||
|
"metadata": {
|
||||||
|
"name": config.name,
|
||||||
|
"labels": {
|
||||||
|
"harmony.nationtech.io/tenant.id": config.id,
|
||||||
|
"harmony.nationtech.io/tenant.name": config.name,
|
||||||
|
},
|
||||||
|
"namespace": config.name,
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"hard": {
|
||||||
|
"limits.cpu": format!("{:.0}",config.resource_limits.cpu_limit_cores),
|
||||||
|
"limits.memory": format!("{:.3}Gi", config.resource_limits.memory_limit_gb),
|
||||||
|
"requests.cpu": format!("{:.0}",config.resource_limits.cpu_request_cores),
|
||||||
|
"requests.memory": format!("{:.3}Gi", config.resource_limits.memory_request_gb),
|
||||||
|
"requests.storage": format!("{:.3}", config.resource_limits.storage_total_gb),
|
||||||
|
"pods": "20",
|
||||||
|
"services": "10",
|
||||||
|
"configmaps": "30",
|
||||||
|
"secrets": "30",
|
||||||
|
"persistentvolumeclaims": "15",
|
||||||
|
"services.loadbalancers": "2",
|
||||||
|
"services.nodeports": "5",
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
);
|
||||||
|
|
||||||
|
let network_policy = json!({
|
||||||
|
"apiVersion": "networking.k8s.io/v1",
|
||||||
|
"kind": "NetworkPolicy",
|
||||||
|
"metadata": {
|
||||||
|
"name": format!("{}-network-policy", config.name),
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"podSelector": {},
|
||||||
|
"egress": [],
|
||||||
|
"ingress": [],
|
||||||
|
"policyTypes": [
|
||||||
|
]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_tenant_resource_limits(
|
||||||
|
&self,
|
||||||
|
tenant_name: &str,
|
||||||
|
new_limits: &ResourceLimits,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_tenant_network_policy(
|
||||||
|
&self,
|
||||||
|
tenant_name: &str,
|
||||||
|
new_policy: &TenantNetworkPolicy,
|
||||||
|
) -> Result<(), ExecutorError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
46
harmony/src/domain/topology/tenant/manager.rs
Normal file
46
harmony/src/domain/topology/tenant/manager.rs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
use super::*;
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::executors::ExecutorError;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait TenantManager {
|
||||||
|
/// Provisions a new tenant based on the provided configuration.
|
||||||
|
/// This operation should be idempotent; if a tenant with the same `config.name`
|
||||||
|
/// already exists and matches the config, it will succeed without changes.
|
||||||
|
/// If it exists but differs, it will be updated, or return an error if the update
|
||||||
|
/// action is not supported
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `config`: The desired configuration for the new tenant.
|
||||||
|
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>;
|
||||||
|
|
||||||
|
/// Updates the resource limits for an existing tenant.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `tenant_name`: The logical name of the tenant to update.
|
||||||
|
/// * `new_limits`: The new set of resource limits to apply.
|
||||||
|
async fn update_tenant_resource_limits(
|
||||||
|
&self,
|
||||||
|
tenant_name: &str,
|
||||||
|
new_limits: &ResourceLimits,
|
||||||
|
) -> Result<(), ExecutorError>;
|
||||||
|
|
||||||
|
/// Updates the high-level network isolation policy for an existing tenant.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `tenant_name`: The logical name of the tenant to update.
|
||||||
|
/// * `new_policy`: The new network policy to apply.
|
||||||
|
async fn update_tenant_network_policy(
|
||||||
|
&self,
|
||||||
|
tenant_name: &str,
|
||||||
|
new_policy: &TenantNetworkPolicy,
|
||||||
|
) -> Result<(), ExecutorError>;
|
||||||
|
|
||||||
|
/// Decommissions an existing tenant, removing its isolated context and associated resources.
|
||||||
|
/// This operation should be idempotent.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `tenant_name`: The logical name of the tenant to deprovision.
|
||||||
|
async fn deprovision_tenant(&self, tenant_name: &str) -> Result<(), ExecutorError>;
|
||||||
|
}
|
||||||
89
harmony/src/domain/topology/tenant/mod.rs
Normal file
89
harmony/src/domain/topology/tenant/mod.rs
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
pub mod k8s;
|
||||||
|
mod manager;
|
||||||
|
pub use manager::*;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::data::Id;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
|
||||||
|
pub struct TenantConfig {
|
||||||
|
/// This will be used as the primary unique identifier for management operations and will never
|
||||||
|
/// change for the entire lifetime of the tenant
|
||||||
|
pub id: Id,
|
||||||
|
|
||||||
|
/// A human-readable name for the tenant (e.g., "client-alpha", "project-phoenix").
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
/// Desired resource allocations and limits for the tenant.
|
||||||
|
pub resource_limits: ResourceLimits,
|
||||||
|
|
||||||
|
/// High-level network isolation policies for the tenant.
|
||||||
|
pub network_policy: TenantNetworkPolicy,
|
||||||
|
|
||||||
|
/// Key-value pairs for provider-specific tagging, labeling, or metadata.
|
||||||
|
/// Useful for billing, organization, or filtering within the provider's console.
|
||||||
|
pub labels_or_tags: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TenantConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
let id = Id::default();
|
||||||
|
Self {
|
||||||
|
name: format!("tenant_{id}"),
|
||||||
|
id,
|
||||||
|
resource_limits: ResourceLimits {
|
||||||
|
cpu_request_cores: 4.0,
|
||||||
|
cpu_limit_cores: 4.0,
|
||||||
|
memory_request_gb: 4.0,
|
||||||
|
memory_limit_gb: 4.0,
|
||||||
|
storage_total_gb: 20.0,
|
||||||
|
},
|
||||||
|
network_policy: TenantNetworkPolicy {
|
||||||
|
default_inter_tenant_ingress: InterTenantIngressPolicy::DenyAll,
|
||||||
|
default_internet_egress: InternetEgressPolicy::AllowAll,
|
||||||
|
},
|
||||||
|
labels_or_tags: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct ResourceLimits {
|
||||||
|
/// Requested/guaranteed CPU cores (e.g., 2.0).
|
||||||
|
pub cpu_request_cores: f32,
|
||||||
|
/// Maximum CPU cores the tenant can burst to (e.g., 4.0).
|
||||||
|
pub cpu_limit_cores: f32,
|
||||||
|
|
||||||
|
/// Requested/guaranteed memory in Gigabytes (e.g., 8.0).
|
||||||
|
pub memory_request_gb: f32,
|
||||||
|
/// Maximum memory in Gigabytes tenant can burst to (e.g., 16.0).
|
||||||
|
pub memory_limit_gb: f32,
|
||||||
|
|
||||||
|
/// Total persistent storage allocation in Gigabytes across all volumes.
|
||||||
|
pub storage_total_gb: f32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct TenantNetworkPolicy {
|
||||||
|
/// Policy for ingress traffic originating from other tenants within the same Harmony-managed environment.
|
||||||
|
pub default_inter_tenant_ingress: InterTenantIngressPolicy,
|
||||||
|
|
||||||
|
/// Policy for egress traffic destined for the public internet.
|
||||||
|
pub default_internet_egress: InternetEgressPolicy,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub enum InterTenantIngressPolicy {
|
||||||
|
/// Deny all traffic from other tenants by default.
|
||||||
|
DenyAll,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub enum InternetEgressPolicy {
|
||||||
|
/// Allow all outbound traffic to the internet.
|
||||||
|
AllowAll,
|
||||||
|
/// Deny all outbound traffic to the internet by default.
|
||||||
|
DenyAll,
|
||||||
|
}
|
||||||
@@ -23,7 +23,7 @@ pub struct HelmRepository {
|
|||||||
force_update: bool,
|
force_update: bool,
|
||||||
}
|
}
|
||||||
impl HelmRepository {
|
impl HelmRepository {
|
||||||
pub(crate) fn new(name: String, url: Url, force_update: bool) -> Self {
|
pub fn new(name: String, url: Url, force_update: bool) -> Self {
|
||||||
Self {
|
Self {
|
||||||
name,
|
name,
|
||||||
url,
|
url,
|
||||||
@@ -104,7 +104,10 @@ impl HelmChartInterpret {
|
|||||||
|
|
||||||
fn run_helm_command(args: &[&str]) -> Result<Output, InterpretError> {
|
fn run_helm_command(args: &[&str]) -> Result<Output, InterpretError> {
|
||||||
let command_str = format!("helm {}", args.join(" "));
|
let command_str = format!("helm {}", args.join(" "));
|
||||||
debug!("Got KUBECONFIG: `{}`", std::env::var("KUBECONFIG").unwrap());
|
debug!(
|
||||||
|
"Got KUBECONFIG: `{}`",
|
||||||
|
std::env::var("KUBECONFIG").unwrap_or("".to_string())
|
||||||
|
);
|
||||||
debug!("Running Helm command: `{}`", command_str);
|
debug!("Running Helm command: `{}`", command_str);
|
||||||
|
|
||||||
let output = Command::new("helm")
|
let output = Command::new("helm")
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use non_blank_string_rs::NonBlankString;
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::env::temp_dir;
|
|
||||||
use std::ffi::OsStr;
|
|
||||||
use std::io::ErrorKind;
|
use std::io::ErrorKind;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::PathBuf;
|
||||||
use std::process::{Command, Output};
|
use std::process::{Command, Output};
|
||||||
use temp_dir::{self, TempDir};
|
use temp_dir::{self, TempDir};
|
||||||
use temp_file::TempFile;
|
use temp_file::TempFile;
|
||||||
|
|||||||
@@ -12,4 +12,5 @@ pub mod load_balancer;
|
|||||||
pub mod monitoring;
|
pub mod monitoring;
|
||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
|
pub mod tenant;
|
||||||
pub mod tftp;
|
pub mod tftp;
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ use serde::Serialize;
|
|||||||
|
|
||||||
use super::monitoring_alerting::AlertChannel;
|
use super::monitoring_alerting::AlertChannel;
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct KubePrometheusConfig {
|
pub struct KubePrometheusConfig {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
|
|||||||
@@ -1,32 +1,29 @@
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
use crate::modules::helm::chart::HelmChartScore;
|
use crate::modules::helm::chart::HelmChartScore;
|
||||||
|
|
||||||
use super::{config::KubePrometheusConfig, monitoring_alerting::AlertChannel};
|
pub fn discord_alert_manager_score(
|
||||||
|
webhook_url: Url,
|
||||||
fn get_discord_alert_manager_score(config: &KubePrometheusConfig) -> Option<HelmChartScore> {
|
namespace: String,
|
||||||
let (url, name) = config.alert_channel.iter().find_map(|channel| {
|
name: String,
|
||||||
if let AlertChannel::Discord { webhook_url, name } = channel {
|
) -> HelmChartScore {
|
||||||
Some((webhook_url, name))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let values = format!(
|
let values = format!(
|
||||||
r#"
|
r#"
|
||||||
environment:
|
environment:
|
||||||
- name: "DISCORD_WEBHOOK"
|
- name: "DISCORD_WEBHOOK"
|
||||||
value: "{url}"
|
value: "{webhook_url}"
|
||||||
"#,
|
"#,
|
||||||
);
|
);
|
||||||
|
|
||||||
Some(HelmChartScore {
|
HelmChartScore {
|
||||||
namespace: Some(NonBlankString::from_str(&config.namespace).unwrap()),
|
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
|
||||||
release_name: NonBlankString::from_str(&name).unwrap(),
|
release_name: NonBlankString::from_str(&name).unwrap(),
|
||||||
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/library/alertmanager-discord")
|
chart_name: NonBlankString::from_str(
|
||||||
|
"oci://hub.nationtech.io/library/alertmanager-discord",
|
||||||
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
chart_version: None,
|
chart_version: None,
|
||||||
values_overrides: None,
|
values_overrides: None,
|
||||||
@@ -34,13 +31,5 @@ environment:
|
|||||||
create_namespace: true,
|
create_namespace: true,
|
||||||
install_only: true,
|
install_only: true,
|
||||||
repository: None,
|
repository: None,
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn discord_alert_manager_score(config: &KubePrometheusConfig) -> HelmChartScore {
|
|
||||||
if let Some(chart) = get_discord_alert_manager_score(config) {
|
|
||||||
chart
|
|
||||||
} else {
|
|
||||||
panic!("Expected discord alert manager helm chart");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
55
harmony/src/modules/monitoring/discord_webhook_sender.rs
Normal file
55
harmony/src/modules/monitoring/discord_webhook_sender.rs
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use serde_json::Value;
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
interpret::{InterpretError, Outcome},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct DiscordWebhookConfig {
|
||||||
|
pub webhook_url: Url,
|
||||||
|
pub name: String,
|
||||||
|
pub send_resolved_notifications: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait DiscordWebhookReceiver {
|
||||||
|
fn deploy_discord_webhook_receiver(
|
||||||
|
&self,
|
||||||
|
_notification_adapter_id: &str,
|
||||||
|
) -> Result<Outcome, InterpretError>;
|
||||||
|
|
||||||
|
fn delete_discord_webhook_receiver(
|
||||||
|
&self,
|
||||||
|
_notification_adapter_id: &str,
|
||||||
|
) -> Result<Outcome, InterpretError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
// trait used to generate alert manager config values impl<T: Topology + AlertManagerConfig> Monitor for KubePrometheus
|
||||||
|
pub trait AlertManagerConfig<T> {
|
||||||
|
fn get_alert_manager_config(&self) -> Result<Value, InterpretError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: DiscordWebhookReceiver> AlertManagerConfig<T> for DiscordWebhookConfig {
|
||||||
|
fn get_alert_manager_config(&self) -> Result<Value, InterpretError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl DiscordWebhookReceiver for K8sAnywhereTopology {
|
||||||
|
fn deploy_discord_webhook_receiver(
|
||||||
|
&self,
|
||||||
|
_notification_adapter_id: &str,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn delete_discord_webhook_receiver(
|
||||||
|
&self,
|
||||||
|
_notification_adapter_id: &str,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
|
mod config;
|
||||||
|
mod discord_alert_manager;
|
||||||
|
pub mod discord_webhook_sender;
|
||||||
mod kube_prometheus;
|
mod kube_prometheus;
|
||||||
pub mod monitoring_alerting;
|
pub mod monitoring_alerting;
|
||||||
mod discord_alert_manager;
|
|
||||||
mod config;
|
|
||||||
|
|||||||
@@ -96,28 +96,28 @@ impl MonitoringAlertingStackInterpret {
|
|||||||
topology: &T,
|
topology: &T,
|
||||||
config: &KubePrometheusConfig,
|
config: &KubePrometheusConfig,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let mut outcomes = vec![];
|
//let mut outcomes = vec![];
|
||||||
|
|
||||||
for channel in &self.score.alert_channel {
|
//for channel in &self.score.alert_channel {
|
||||||
let outcome = match channel {
|
// let outcome = match channel {
|
||||||
AlertChannel::Discord { .. } => {
|
// AlertChannel::Discord { .. } => {
|
||||||
discord_alert_manager_score(config)
|
// discord_alert_manager_score(config)
|
||||||
.create_interpret()
|
// .create_interpret()
|
||||||
.execute(inventory, topology)
|
// .execute(inventory, topology)
|
||||||
.await
|
// .await
|
||||||
}
|
// }
|
||||||
AlertChannel::Slack { .. } => Ok(Outcome::success(
|
// AlertChannel::Slack { .. } => Ok(Outcome::success(
|
||||||
"No extra configs for slack alerting".to_string(),
|
// "No extra configs for slack alerting".to_string(),
|
||||||
)),
|
// )),
|
||||||
AlertChannel::Smpt { .. } => {
|
// AlertChannel::Smpt { .. } => {
|
||||||
todo!()
|
// todo!()
|
||||||
}
|
// }
|
||||||
};
|
// };
|
||||||
outcomes.push(outcome);
|
// outcomes.push(outcome);
|
||||||
}
|
//}
|
||||||
for result in outcomes {
|
//for result in outcomes {
|
||||||
result?;
|
// result?;
|
||||||
}
|
//}
|
||||||
|
|
||||||
Ok(Outcome::success("All alert channels deployed".to_string()))
|
Ok(Outcome::success("All alert channels deployed".to_string()))
|
||||||
}
|
}
|
||||||
|
|||||||
67
harmony/src/modules/tenant/mod.rs
Normal file
67
harmony/src/modules/tenant/mod.rs
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::{Id, Version},
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::{
|
||||||
|
Topology,
|
||||||
|
tenant::{TenantConfig, TenantManager},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Clone)]
|
||||||
|
pub struct TenantScore {
|
||||||
|
pub config: TenantConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + TenantManager> Score<T> for TenantScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
|
Box::new(TenantInterpret {
|
||||||
|
tenant_config: self.config.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("{} TenantScore", self.config.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct TenantInterpret {
|
||||||
|
tenant_config: TenantConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + TenantManager> Interpret<T> for TenantInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
topology.provision_tenant(&self.tenant_config).await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"Successfully provisioned tenant {} with id {}",
|
||||||
|
self.tenant_config.name, self.tenant_config.id
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::TenantInterpret
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user