Compare commits
1 Commits
feat/webho
...
runtime-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f7c4924c1 |
@@ -1,5 +0,0 @@
|
||||
[target.x86_64-pc-windows-msvc]
|
||||
rustflags = ["-C", "link-arg=/STACK:8000000"]
|
||||
|
||||
[target.x86_64-pc-windows-gnu]
|
||||
rustflags = ["-C", "link-arg=-Wl,--stack,8000000"]
|
||||
@@ -1,2 +0,0 @@
|
||||
target/
|
||||
Dockerfile
|
||||
@@ -1,18 +0,0 @@
|
||||
name: Run Check Script
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run check script
|
||||
run: bash check.sh
|
||||
@@ -1,95 +0,0 @@
|
||||
name: Compile and package harmony_composer
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
package_harmony_composer:
|
||||
container:
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||
runs-on: dind
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build for Linux x86_64
|
||||
run: cargo build --release --bin harmony_composer --target x86_64-unknown-linux-gnu
|
||||
|
||||
- name: Build for Windows x86_64 GNU
|
||||
run: cargo build --release --bin harmony_composer --target x86_64-pc-windows-gnu
|
||||
|
||||
- name: Setup log into hub.nationtech.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: hub.nationtech.io
|
||||
username: ${{ secrets.HUB_BOT_USER }}
|
||||
password: ${{ secrets.HUB_BOT_PASSWORD }}
|
||||
|
||||
# TODO: build ARM images and MacOS binaries (or other targets) too
|
||||
|
||||
- name: Update snapshot-latest tag
|
||||
run: |
|
||||
git config user.name "Gitea CI"
|
||||
git config user.email "ci@nationtech.io"
|
||||
git tag -f snapshot-latest
|
||||
git push origin snapshot-latest --force
|
||||
|
||||
- name: Install jq
|
||||
run: apt install -y jq # The current image includes apt lists so we don't have to apt update and rm /var/lib/apt... every time. But if the image is optimized it won't work anymore
|
||||
|
||||
- name: Create or update release
|
||||
run: |
|
||||
# First, check if release exists and delete it if it does
|
||||
RELEASE_ID=$(curl -s -X GET \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
|
||||
| jq -r '.id // empty')
|
||||
|
||||
if [ -n "$RELEASE_ID" ]; then
|
||||
# Delete existing release
|
||||
curl -X DELETE \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
|
||||
fi
|
||||
|
||||
# Create new release
|
||||
RESPONSE=$(curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"tag_name": "snapshot-latest",
|
||||
"name": "Latest Snapshot",
|
||||
"body": "Automated snapshot build from master branch",
|
||||
"draft": false,
|
||||
"prerelease": true
|
||||
}' \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
|
||||
|
||||
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Linux binary
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@target/x86_64-unknown-linux-gnu/release/harmony_composer" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/${{ env.RELEASE_ID }}/assets?name=harmony_composer"
|
||||
|
||||
- name: Upload Windows binary
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@target/x86_64-pc-windows-gnu/release/harmony_composer.exe" \
|
||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/${{ env.RELEASE_ID }}/assets?name=harmony_composer.exe"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: hub.nationtech.io/harmony/harmony_composer:latest
|
||||
@@ -1,36 +0,0 @@
|
||||
# Contributing to the Harmony project
|
||||
|
||||
## Write small P-R
|
||||
|
||||
Aim for the smallest piece of work that is mergeable.
|
||||
|
||||
Mergeable means that :
|
||||
|
||||
- it does not break the build
|
||||
- it moves the codebase one step forward
|
||||
|
||||
P-Rs can be many things, they do not have to be complete features.
|
||||
|
||||
### What a P-R **should** be
|
||||
|
||||
- Introduce a new trait : This will be the place to discuss the new trait addition, its design and implementation
|
||||
- A new implementation of a trait : a new concrete implementation of the LoadBalancer trait
|
||||
- A new CI check : something that improves quality, robustness, ci performance
|
||||
- Documentation improvements
|
||||
- Refactoring
|
||||
- Bugfix
|
||||
|
||||
### What a P-R **should not** be
|
||||
|
||||
- Large. Anything over 200 lines (excluding generated lines) should have a very good reason to be this large.
|
||||
- A mix of refactoring, bug fixes and new features.
|
||||
- Introducing multiple new features or ideas at once.
|
||||
- Multiple new implementations of a trait/functionnality at once
|
||||
|
||||
The general idea is to keep P-Rs small and single purpose.
|
||||
|
||||
## Commit message formatting
|
||||
|
||||
We follow conventional commits guidelines.
|
||||
|
||||
https://www.conventionalcommits.org/en/v1.0.0/
|
||||
740
Cargo.lock
generated
740
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
12
Cargo.toml
12
Cargo.toml
@@ -11,7 +11,6 @@ members = [
|
||||
"opnsense-config-xml",
|
||||
"harmony_cli",
|
||||
"k3d",
|
||||
"harmony_composer",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -24,13 +23,8 @@ log = "0.4.22"
|
||||
env_logger = "0.11.5"
|
||||
derive-new = "0.7.0"
|
||||
async-trait = "0.1.82"
|
||||
tokio = { version = "1.40.0", features = [
|
||||
"io-std",
|
||||
"fs",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
cidr = { features = ["serde"], version = "0.2" }
|
||||
tokio = { version = "1.40.0", features = ["io-std", "fs", "macros", "rt-multi-thread"] }
|
||||
cidr = "0.2.3"
|
||||
russh = "0.45.0"
|
||||
russh-keys = "0.45.0"
|
||||
rand = "0.8.5"
|
||||
@@ -41,8 +35,6 @@ serde_yaml = "0.9.34"
|
||||
serde-value = "0.7.0"
|
||||
http = "1.2.0"
|
||||
inquire = "0.7.5"
|
||||
convert_case = "0.8.0"
|
||||
chrono = "0.4"
|
||||
|
||||
[workspace.dependencies.uuid]
|
||||
version = "1.11.0"
|
||||
|
||||
25
Dockerfile
25
Dockerfile
@@ -1,25 +0,0 @@
|
||||
FROM docker.io/rust:1.87.0 AS build
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cargo build --release --bin harmony_composer
|
||||
|
||||
FROM docker.io/rust:1.87.0
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN rustup target add x86_64-pc-windows-gnu
|
||||
RUN rustup target add x86_64-unknown-linux-gnu
|
||||
RUN rustup component add rustfmt
|
||||
|
||||
RUN apt update
|
||||
|
||||
# TODO: Consider adding more supported targets
|
||||
# nodejs for checkout action, docker for building containers, mingw for cross-compiling for windows
|
||||
RUN apt install -y nodejs docker.io mingw-w64
|
||||
|
||||
COPY --from=build /app/target/release/harmony_composer .
|
||||
|
||||
ENTRYPOINT ["/app/harmony_composer"]
|
||||
162
README.md
162
README.md
@@ -1,151 +1,33 @@
|
||||
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code.
|
||||
*By [NationTech](https://nationtech.io)*
|
||||
# Harmony : Open Infrastructure Orchestration
|
||||
|
||||
[](https://git.nationtech.io/nationtech/harmony)
|
||||
[](LICENSE)
|
||||
## Quick demo
|
||||
|
||||
### Unify
|
||||
`cargo run -p example-tui`
|
||||
|
||||
- **Project Scaffolding**
|
||||
- **Infrastructure Provisioning**
|
||||
- **Application Deployment**
|
||||
- **Day-2 operations**
|
||||
This will launch Harmony's minimalist terminal ui which embeds a few demo scores.
|
||||
|
||||
All in **one strongly-typed Rust codebase**.
|
||||
Usage instructions will be displayed at the bottom of the TUI.
|
||||
|
||||
### Deploy anywhere
|
||||
`cargo run --bin example-cli -- --help`
|
||||
|
||||
From a **developer laptop** to a **global production cluster**, a single **source of truth** drives the **full software lifecycle.**
|
||||
This is the harmony CLI, a minimal implementation
|
||||
|
||||
---
|
||||
The current help text:
|
||||
|
||||
## 1 · The Harmony Philosophy
|
||||
````
|
||||
Usage: example-cli [OPTIONS]
|
||||
|
||||
Infrastructure is essential, but it shouldn’t be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
|
||||
Options:
|
||||
-y, --yes Run score(s) or not
|
||||
-f, --filter <FILTER> Filter query
|
||||
-i, --interactive Run interactive TUI or not
|
||||
-a, --all Run all or nth, defaults to all
|
||||
-n, --number <NUMBER> Run nth matching, zero indexed [default: 0]
|
||||
-l, --list list scores, will also be affected by run filter
|
||||
-h, --help Print help
|
||||
-V, --version Print version```
|
||||
|
||||
| Principle | What it means for you |
|
||||
|-----------|-----------------------|
|
||||
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
|
||||
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your application’s needs match the target environment’s capabilities at **compile-time**, eliminating an entire class of runtime outages. |
|
||||
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
|
||||
## Core architecture
|
||||
|
||||
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
|
||||
|
||||
---
|
||||
|
||||
## 2 · Quick Start
|
||||
|
||||
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
lamp::{LAMPConfig, LAMPScore},
|
||||
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// 1. Describe what you want
|
||||
let lamp_stack = LAMPScore {
|
||||
name: "harmony-lamp-demo".into(),
|
||||
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
|
||||
php_version: Version::from("8.3.0").unwrap(),
|
||||
config: LAMPConfig {
|
||||
project_root: "./php".into(),
|
||||
database_size: "4Gi".into(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
// 2. Pick where it should run
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// 3. Enhance with extra scores (monitoring, CI/CD, …)
|
||||
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||
|
||||
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring)]);
|
||||
|
||||
// 4. Launch an interactive CLI / TUI
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
Run it:
|
||||
|
||||
```bash
|
||||
cargo run
|
||||
```
|
||||
|
||||
Harmony analyses the code, shows an execution plan in a TUI, and applies it once you confirm. Same code, same binary—every environment.
|
||||
|
||||
---
|
||||
|
||||
## 3 · Core Concepts
|
||||
|
||||
| Term | One-liner |
|
||||
|------|-----------|
|
||||
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
|
||||
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
|
||||
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified *Capabilities* (Kubernetes, DNS, …). |
|
||||
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
|
||||
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
|
||||
|
||||
A visual overview is in the diagram below.
|
||||
|
||||
[Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg)
|
||||
|
||||
---
|
||||
|
||||
## 4 · Install
|
||||
|
||||
Prerequisites:
|
||||
|
||||
* Rust
|
||||
* Docker (if you deploy locally)
|
||||
* `kubectl` / `helm` for Kubernetes-based topologies
|
||||
|
||||
```bash
|
||||
git clone https://git.nationtech.io/nationtech/harmony
|
||||
cd harmony
|
||||
cargo build --release # builds the CLI, TUI and libraries
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5 · Learning More
|
||||
|
||||
* **Architectural Decision Records** – dive into the rationale
|
||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
||||
|
||||
* **Extending Harmony** – write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
|
||||
|
||||
* **Community** – discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||
|
||||
---
|
||||
|
||||
## 6 · License
|
||||
|
||||
Harmony is released under the **GNU AGPL v3**.
|
||||
|
||||
> We choose a strong copyleft license to ensure the project—and every improvement to it—remains open and benefits the entire community. Fork it, enhance it, even out-innovate us; just keep it open.
|
||||
|
||||
See [LICENSE](LICENSE) for the full text.
|
||||
|
||||
---
|
||||
|
||||
*Made with ❤️ & 🦀 by the NationTech and the Harmony community*
|
||||

|
||||
````
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Architecture Decision Record: \<Title\>
|
||||
|
||||
Initial Author: \<Name\>
|
||||
Name: \<Name\>
|
||||
|
||||
Initial Date: \<Date\>
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Architecture Decision Record: Helm and Kustomize Handling
|
||||
|
||||
Initial Author: Taha Hawa
|
||||
Name: Taha Hawa
|
||||
|
||||
Initial Date: 2025-04-15
|
||||
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
pub trait MonitoringSystem {}
|
||||
|
||||
// 1. Modified AlertReceiver trait:
|
||||
// - Removed the problematic `clone` method.
|
||||
// - Added `box_clone` which returns a Box<dyn AlertReceiver>.
|
||||
pub trait AlertReceiver {
|
||||
type M: MonitoringSystem;
|
||||
fn install(&self, sender: &Self::M) -> Result<(), String>;
|
||||
// This method allows concrete types to clone themselves into a Box<dyn AlertReceiver>
|
||||
fn box_clone(&self) -> Box<dyn AlertReceiver<M = Self::M>>;
|
||||
}
|
||||
#[derive(Clone)]
|
||||
struct Prometheus{}
|
||||
impl MonitoringSystem for Prometheus {}
|
||||
|
||||
#[derive(Clone)] // Keep derive(Clone) for DiscordWebhook itself
|
||||
struct DiscordWebhook{}
|
||||
|
||||
impl AlertReceiver for DiscordWebhook {
|
||||
type M = Prometheus;
|
||||
fn install(&self, sender: &Self::M) -> Result<(), String> {
|
||||
// Placeholder for actual installation logic
|
||||
println!("DiscordWebhook installed for Prometheus monitoring.");
|
||||
Ok(())
|
||||
}
|
||||
// 2. Implement `box_clone` for DiscordWebhook:
|
||||
// This uses the derived `Clone` for DiscordWebhook to create a new boxed instance.
|
||||
fn box_clone(&self) -> Box<dyn AlertReceiver<M = Self::M>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Implement `std::clone::Clone` for `Box<dyn AlertReceiver<M= M>>`:
|
||||
// This allows `Box<dyn AlertReceiver>` to be cloned.
|
||||
// The `+ 'static` lifetime bound is often necessary for trait objects stored in collections,
|
||||
// ensuring they live long enough.
|
||||
impl<M: MonitoringSystem + 'static> Clone for Box<dyn AlertReceiver<M= M>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.box_clone() // Call the custom `box_clone` method
|
||||
}
|
||||
}
|
||||
|
||||
// MonitoringConfig can now derive Clone because its `receivers` field
|
||||
// (Vec<Box<dyn AlertReceiver<M = M>>>) is now cloneable.
|
||||
#[derive(Clone)]
|
||||
struct MonitoringConfig <M: MonitoringSystem + 'static>{
|
||||
receivers: Vec<Box<dyn AlertReceiver<M = M>>>
|
||||
}
|
||||
|
||||
// Example usage to demonstrate compilation and functionality
|
||||
fn main() {
|
||||
let prometheus_instance = Prometheus{};
|
||||
let discord_webhook_instance = DiscordWebhook{};
|
||||
|
||||
let mut config = MonitoringConfig {
|
||||
receivers: Vec::new()
|
||||
};
|
||||
|
||||
// Create a boxed alert receiver
|
||||
let boxed_receiver: Box<dyn AlertReceiver<M = Prometheus>> = Box::new(discord_webhook_instance);
|
||||
config.receivers.push(boxed_receiver);
|
||||
|
||||
// Clone the config, which will now correctly clone the boxed receiver
|
||||
let cloned_config = config.clone();
|
||||
|
||||
println!("Original config has {} receivers.", config.receivers.len());
|
||||
println!("Cloned config has {} receivers.", cloned_config.receivers.len());
|
||||
|
||||
// Example of using the installed receiver
|
||||
if let Some(receiver) = config.receivers.get(0) {
|
||||
let _ = receiver.install(&prometheus_instance);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
# Architecture Decision Record: Monitoring and Alerting
|
||||
|
||||
Initial Author : Willem Rolleman
|
||||
Date : April 28 2025
|
||||
Proposed by: Willem Rolleman
|
||||
Date: April 28 2025
|
||||
|
||||
## Status
|
||||
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
# Architecture Decision Record: Multi-Tenancy Strategy for Harmony Managed Clusters
|
||||
|
||||
Initial Author: Jean-Gabriel Gill-Couture
|
||||
|
||||
Initial Date: 2025-05-26
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
Harmony manages production OKD/Kubernetes clusters that serve multiple clients with varying trust levels and operational requirements. We need a multi-tenancy strategy that provides:
|
||||
|
||||
1. **Strong isolation** between client workloads while maintaining operational simplicity
|
||||
2. **Controlled API access** allowing clients self-service capabilities within defined boundaries
|
||||
3. **Security-first approach** protecting both the cluster infrastructure and tenant data
|
||||
4. **Harmony-native implementation** using our Score/Interpret pattern for automated tenant provisioning
|
||||
5. **Scalable management** supporting both small trusted clients and larger enterprise customers
|
||||
|
||||
The official Kubernetes multi-tenancy documentation identifies two primary models: namespace-based isolation and virtual control planes per tenant. Given Harmony's focus on operational simplicity, provider-agnostic abstractions (ADR-003), and hexagonal architecture (ADR-002), we must choose an approach that balances security, usability, and maintainability.
|
||||
|
||||
Our clients represent a hybrid tenancy model:
|
||||
- **Customer multi-tenancy**: Each client operates independently with no cross-tenant trust
|
||||
- **Team multi-tenancy**: Individual clients may have multiple team members requiring coordinated access
|
||||
- **API access requirement**: Unlike pure SaaS scenarios, clients need controlled Kubernetes API access for self-service operations
|
||||
|
||||
The official kubernetes documentation on multi tenancy heavily inspired this ADR : https://kubernetes.io/docs/concepts/security/multi-tenancy/
|
||||
|
||||
## Decision
|
||||
|
||||
Implement **namespace-based multi-tenancy** with the following architecture:
|
||||
|
||||
### 1. Network Security Model
|
||||
- **Private cluster access**: Kubernetes API and OpenShift console accessible only via WireGuard VPN
|
||||
- **No public exposure**: Control plane endpoints remain internal to prevent unauthorized access attempts
|
||||
- **VPN-based authentication**: Initial access control through WireGuard client certificates
|
||||
|
||||
### 2. Tenant Isolation Strategy
|
||||
- **Dedicated namespace per tenant**: Each client receives an isolated namespace with access limited only to the required resources and operations
|
||||
- **Complete network isolation**: NetworkPolicies prevent cross-namespace communication while allowing full egress to public internet
|
||||
- **Resource governance**: ResourceQuotas and LimitRanges enforce CPU, memory, and storage consumption limits
|
||||
- **Storage access control**: Clients can create PersistentVolumeClaims but cannot directly manipulate PersistentVolumes or access other tenants' storage
|
||||
|
||||
### 3. Access Control Framework
|
||||
- **Principle of Least Privilege**: RBAC grants only necessary permissions within tenant namespace scope
|
||||
- **Namespace-scoped**: Clients can create/modify/delete resources within their namespace
|
||||
- **Cluster-level restrictions**: No access to cluster-wide resources, other namespaces, or sensitive cluster operations
|
||||
- **Whitelisted operations**: Controlled self-service capabilities for ingress, secrets, configmaps, and workload management
|
||||
|
||||
### 4. Identity Management Evolution
|
||||
- **Phase 1**: Manual provisioning of VPN access and Kubernetes ServiceAccounts/Users
|
||||
- **Phase 2**: Migration to Keycloak-based identity management (aligning with ADR-006) for centralized authentication and lifecycle management
|
||||
|
||||
### 5. Harmony Integration
|
||||
- **TenantScore implementation**: Declarative tenant provisioning using Harmony's Score/Interpret pattern
|
||||
- **Topology abstraction**: Tenant configuration abstracted from underlying Kubernetes implementation details
|
||||
- **Automated deployment**: Complete tenant setup automated through Harmony's orchestration capabilities
|
||||
|
||||
## Rationale
|
||||
|
||||
### Network Security Through VPN Access
|
||||
- **Defense in depth**: VPN requirement adds critical security layer preventing unauthorized cluster access
|
||||
- **Simplified firewall rules**: No need for complex public endpoint protections or rate limiting
|
||||
- **Audit capability**: VPN access provides clear audit trail of cluster connections
|
||||
- **Aligns with enterprise practices**: Most enterprise customers already use VPN infrastructure
|
||||
|
||||
### Namespace Isolation vs Virtual Control Planes
|
||||
Following Kubernetes official guidance, namespace isolation provides:
|
||||
- **Lower resource overhead**: Virtual control planes require dedicated etcd, API server, and controller manager per tenant
|
||||
- **Operational simplicity**: Single control plane to maintain, upgrade, and monitor
|
||||
- **Cross-tenant service integration**: Enables future controlled cross-tenant communication if required
|
||||
- **Proven stability**: Namespace-based isolation is well-tested and widely deployed
|
||||
- **Cost efficiency**: Significantly lower infrastructure costs compared to dedicated control planes
|
||||
|
||||
### Hybrid Tenancy Model Suitability
|
||||
Our approach addresses both customer and team multi-tenancy requirements:
|
||||
- **Customer isolation**: Strong network and RBAC boundaries prevent cross-tenant interference
|
||||
- **Team collaboration**: Multiple team members can share namespace access through group-based RBAC
|
||||
- **Self-service balance**: Controlled API access enables client autonomy without compromising security
|
||||
|
||||
### Harmony Architecture Alignment
|
||||
- **Provider agnostic**: TenantScore abstracts multi-tenancy concepts, enabling future support for other Kubernetes distributions
|
||||
- **Hexagonal architecture**: Tenant management becomes an infrastructure capability accessed through well-defined ports
|
||||
- **Declarative automation**: Tenant lifecycle fully managed through Harmony's Score execution model
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive Consequences
|
||||
- **Strong security posture**: VPN + namespace isolation provides robust tenant separation
|
||||
- **Operational efficiency**: Single cluster management with automated tenant provisioning
|
||||
- **Client autonomy**: Self-service capabilities reduce operational support burden
|
||||
- **Scalable architecture**: Can support hundreds of tenants per cluster without architectural changes
|
||||
- **Future flexibility**: Foundation supports evolution to more sophisticated multi-tenancy models
|
||||
- **Cost optimization**: Shared infrastructure maximizes resource utilization
|
||||
|
||||
### Negative Consequences
|
||||
- **VPN operational overhead**: Requires VPN infrastructure management
|
||||
- **Manual provisioning complexity**: Phase 1 manual user management creates administrative burden
|
||||
- **Network policy dependency**: Requires CNI with NetworkPolicy support (OVN-Kubernetes provides this and is the OKD/Openshift default)
|
||||
- **Cluster-wide resource limitations**: Some advanced Kubernetes features require cluster-wide access
|
||||
- **Single point of failure**: Cluster outage affects all tenants simultaneously
|
||||
|
||||
### Migration Challenges
|
||||
- **Legacy client integration**: Existing clients may need VPN client setup and credential migration
|
||||
- **Monitoring complexity**: Per-tenant observability requires careful metric and log segmentation
|
||||
- **Backup considerations**: Tenant data backup must respect isolation boundaries
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### Alternative 1: Virtual Control Plane Per Tenant
|
||||
**Pros**: Complete control plane isolation, full Kubernetes API access per tenant
|
||||
**Cons**: 3-5x higher resource usage, complex cross-tenant networking, operational complexity scales linearly with tenants
|
||||
|
||||
**Rejected**: Resource overhead incompatible with cost-effective multi-tenancy goals
|
||||
|
||||
### Alternative 2: Dedicated Clusters Per Tenant
|
||||
**Pros**: Maximum isolation, independent upgrade cycles, simplified security model
|
||||
**Cons**: Exponential operational complexity, prohibitive costs, resource waste
|
||||
|
||||
**Rejected**: Operational overhead makes this approach unsustainable for multiple clients
|
||||
|
||||
### Alternative 3: Public API with Advanced Authentication
|
||||
**Pros**: No VPN requirement, potentially simpler client access
|
||||
**Cons**: Larger attack surface, complex rate limiting and DDoS protection, increased security monitoring requirements
|
||||
|
||||
**Rejected**: Risk/benefit analysis favors VPN-based access control
|
||||
|
||||
### Alternative 4: Service Mesh Based Isolation
|
||||
**Pros**: Fine-grained traffic control, encryption, advanced observability
|
||||
**Cons**: Significant operational complexity, performance overhead, steep learning curve
|
||||
|
||||
**Rejected**: Complexity overhead outweighs benefits for current requirements; remains option for future enhancement
|
||||
|
||||
## Additional Notes
|
||||
|
||||
### Implementation Roadmap
|
||||
1. **Phase 1**: Implement VPN access and manual tenant provisioning
|
||||
2. **Phase 2**: Deploy TenantScore automation for namespace, RBAC, and NetworkPolicy management
|
||||
4. **Phase 3**: Work on privilege escalation from pods, audit for weaknesses, enforce security policies on pod runtimes
|
||||
3. **Phase 4**: Integrate Keycloak for centralized identity management
|
||||
4. **Phase 5**: Add advanced monitoring and per-tenant observability
|
||||
|
||||
### TenantScore Structure Preview
|
||||
```rust
|
||||
pub struct TenantScore {
|
||||
pub tenant_config: TenantConfig,
|
||||
pub resource_quotas: ResourceQuotaConfig,
|
||||
pub network_isolation: NetworkIsolationPolicy,
|
||||
pub storage_access: StorageAccessConfig,
|
||||
pub rbac_config: RBACConfig,
|
||||
}
|
||||
```
|
||||
|
||||
### Future Enhancements
|
||||
- **Cross-tenant service mesh**: For approved inter-tenant communication
|
||||
- **Advanced monitoring**: Per-tenant Prometheus/Grafana instances
|
||||
- **Backup automation**: Tenant-scoped backup policies
|
||||
- **Cost allocation**: Detailed per-tenant resource usage tracking
|
||||
|
||||
This ADR establishes the foundation for secure, scalable multi-tenancy in Harmony-managed clusters while maintaining operational simplicity and cost effectiveness. A follow-up ADR will detail the Tenant abstraction and user management mechanisms within the Harmony framework.
|
||||
@@ -1,41 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: tenant-isolation-policy
|
||||
namespace: testtenant
|
||||
spec:
|
||||
podSelector: {} # Selects all pods in the namespace
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {} # Allow from all pods in the same namespace
|
||||
egress:
|
||||
- to:
|
||||
- podSelector: {} # Allow to all pods in the same namespace
|
||||
- to:
|
||||
- podSelector: {}
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: openshift-dns # Target the openshift-dns namespace
|
||||
# Note, only opening port 53 is not enough, will have to dig deeper into this one eventually
|
||||
# ports:
|
||||
# - protocol: UDP
|
||||
# port: 53
|
||||
# - protocol: TCP
|
||||
# port: 53
|
||||
# Allow egress to public internet only
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
- 10.0.0.0/8 # RFC1918
|
||||
- 172.16.0.0/12 # RFC1918
|
||||
- 192.168.0.0/16 # RFC1918
|
||||
- 169.254.0.0/16 # Link-local
|
||||
- 127.0.0.0/8 # Loopback
|
||||
- 224.0.0.0/4 # Multicast
|
||||
- 240.0.0.0/4 # Reserved
|
||||
- 100.64.0.0/10 # Carrier-grade NAT
|
||||
- 0.0.0.0/8 # Reserved
|
||||
@@ -1,95 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: testtenant
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: testtenant2
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginxinc/nginx-unprivileged
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant
|
||||
spec:
|
||||
selector:
|
||||
app: test-web
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-client
|
||||
namespace: testtenant
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-client
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-client
|
||||
spec:
|
||||
containers:
|
||||
- name: curl
|
||||
image: curlimages/curl:latest
|
||||
command: ["/bin/sh", "-c", "sleep 3600"]
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginxinc/nginx-unprivileged
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: test-web
|
||||
namespace: testtenant2
|
||||
spec:
|
||||
selector:
|
||||
app: test-web
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
@@ -1,63 +0,0 @@
|
||||
# Architecture Decision Record: \<Title\>
|
||||
|
||||
Initial Author: Jean-Gabriel Gill-Couture
|
||||
|
||||
Initial Date: 2025-06-04
|
||||
|
||||
Last Updated Date: 2025-06-04
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
As Harmony's goal is to make software delivery easier, we must provide an easy way for developers to express their app's semantics and dependencies with great abstractions, in a similar fashion to what the score.dev project is doing.
|
||||
|
||||
Thus, we started working on ways to package common types of applications such as LAMP, which we started working on with `LAMPScore`.
|
||||
|
||||
Now is time for the next step : we want to pave the way towards complete lifecycle automation. To do this, we will start with a way to execute Harmony's modules easily from anywhere, starting with locally and in CI environments.
|
||||
|
||||
## Decision
|
||||
|
||||
To achieve easy, portable execution of Harmony, we will follow this architecture :
|
||||
|
||||
- Host a basic harmony release that is compiled with the CLI by our gitea/github server
|
||||
- This binary will do the following : check if there is a `harmony` folder in the current path
|
||||
- If yes
|
||||
- Check if cargo is available locally and compile the harmony binary, or compile the harmony binary using a rust docker container, if neither cargo or a container runtime is available, output a message explaining the situation
|
||||
- Run the newly compiled binary. (Ideally using pid handoff like exec does but some research around this should be done. I think handing off the process is to help with OS interaction such as terminal apps, signals, exit codes, process handling, etc but there might be some side effects)
|
||||
- If not
|
||||
- Suggest initializing a project by auto detecting what the project looks like
|
||||
- When the project type cannot be auto detected, provide links to Harmony's documentation on how to set up a project, a link to the examples folder, and a ask the user if he wants to initialize an empty Harmony project in the current folder
|
||||
- harmony/Cargo.toml with dependencies set
|
||||
- harmony/src/main.rs with an example LAMPScore setup and ready to run
|
||||
- This same binary can be used in a CI environment to run the target project's Harmony module. By default, we provide these opinionated steps :
|
||||
1. **An empty check step.** The purpose of this step is to run all tests and checks against the codebase. For complex projects this could involve a very complex pipeline of test environments setup and execution but this is out of scope for now. This is not handled by harmony. For projects with automatic setup, we can fill this step with something like `cargo fmt --check; cargo test; cargo build` but Harmony is not directly involved in the execution of this step.
|
||||
2. **Package and publish.** Once all checks have passed, the production ready container is built and pushed to a registry. This is done by Harmony.
|
||||
3. **Deploy to staging automatically.**
|
||||
4. **Run a sanity check on staging.** As Harmony is responsible for deploying, Harmony should have all the knowledge of how to perform a sanity check on the staging environment. This will, most of the time, be a simple verification of the kubernetes health of all deployed components, and a poke on the public endpoint when there is one.
|
||||
5. **Deploy to production automatically.** Many projects will require manual approval here, this can be easily set up in the CI afterwards, but our opinion is that
|
||||
6. **Run a sanity check on production.** Same check as staging, but on production.
|
||||
|
||||
*Note on providing a base pipeline :* Having a complete pipeline set up automatically will encourage development teams to build upon these by adding tests where they belong. The goal here is to provide an opiniated solution that works for most small and large projects. Of course, many orgnizations will need to add steps such as deploying to sandbox environments, requiring more advanced approvals, more complex publication and coordination with other projects. But this here encompasses the basics required to build and deploy software reliably at any scale.
|
||||
|
||||
### Environment setup
|
||||
|
||||
TBD : For now, environments (tenants) will be set up and configured manually. Harmony will rely on the kubeconfig provided in the environment where it is running to deploy in the namespace.
|
||||
|
||||
For the CD tool such as Argo or Flux they will be activated by default by Harmony when using application level Scores such as LAMPScore in a similar way that the container is automatically built. Then, CI deployment steps will be notifying the CD tool using its API of the new release to deploy.
|
||||
|
||||
## Rationale
|
||||
|
||||
Reasoning behind the decision
|
||||
|
||||
## Consequences
|
||||
|
||||
Pros/Cons of chosen solution
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
Pros/Cons of various proposed solutions considered
|
||||
|
||||
## Additional Notes
|
||||
@@ -1 +0,0 @@
|
||||
slitaz/* filter=lfs diff=lfs merge=lfs -text
|
||||
@@ -1,6 +0,0 @@
|
||||
#!ipxe
|
||||
|
||||
set base-url http://192.168.33.1:8080
|
||||
set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe
|
||||
|
||||
chain ${hostfile} || chain ${base-url}/default.ipxe
|
||||
@@ -1,35 +0,0 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Boot to Slitaz - old linux for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/nvme0n1
|
||||
set ignition-file ncd0/master.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -1,35 +0,0 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Boot to Slitaz - old linux for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/nvme0n1
|
||||
set ignition-file ncd0/master.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -1,35 +0,0 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Slitaz - an old linux image for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/sda
|
||||
set ignition-file ncd0/worker.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -1,35 +0,0 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Boot to Slitaz - old linux for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/nvme0n1
|
||||
set ignition-file ncd0/master.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -1,35 +0,0 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Slitaz - an old linux image for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/sda
|
||||
set ignition-file ncd0/worker.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -1,37 +0,0 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item okdinstallation Install OKD
|
||||
item slitaz Slitaz - an old linux image for debugging
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
# This is the bootstrap node
|
||||
# it will become wk2
|
||||
|
||||
#################################
|
||||
# okdinstallation
|
||||
#################################
|
||||
:okdinstallation
|
||||
set base-url http://192.168.33.1:8080
|
||||
set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64
|
||||
set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img
|
||||
set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img
|
||||
set install-disk /dev/sda
|
||||
set ignition-file ncd0/worker.ign
|
||||
|
||||
kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp
|
||||
initrd --name main ${base-url}/${live-initramfs}
|
||||
boot
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
@@ -1,71 +0,0 @@
|
||||
#!ipxe
|
||||
menu PXE Boot Menu - [${mac}]
|
||||
item local Boot from Hard Disk
|
||||
item slitaz Boot slitaz live environment [tux|root:root]
|
||||
#item ubuntu-server Ubuntu 24.04.1 live server
|
||||
#item ubuntu-desktop Ubuntu 24.04.1 desktop
|
||||
#item systemrescue System Rescue 11.03
|
||||
item memtest memtest
|
||||
#choose --default local --timeout 5000 selected
|
||||
choose selected
|
||||
|
||||
goto ${selected}
|
||||
|
||||
:local
|
||||
exit
|
||||
|
||||
#################################
|
||||
# slitaz
|
||||
#################################
|
||||
:slitaz
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/slitaz
|
||||
kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz
|
||||
initrd ${base_url}/rootfs.gz
|
||||
boot
|
||||
|
||||
#################################
|
||||
# Ubuntu Server
|
||||
#################################
|
||||
:ubuntu-server
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/ubuntu/live-server-24.04.1
|
||||
|
||||
kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-live-server-amd64.iso autoinstall ds=nocloud
|
||||
initrd ${base_url}/initrd
|
||||
boot
|
||||
|
||||
#################################
|
||||
# Ubuntu Desktop
|
||||
#################################
|
||||
:ubuntu-desktop
|
||||
set server_ip 192.168.33.1:8080
|
||||
set base_url http://${server_ip}/ubuntu/desktop-24.04.1
|
||||
|
||||
kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-desktop-amd64.iso autoinstall ds=nocloud
|
||||
initrd ${base_url}/initrd
|
||||
boot
|
||||
|
||||
#################################
|
||||
# System Rescue
|
||||
#################################
|
||||
:systemrescue
|
||||
set base-url http://192.168.33.1:8080/systemrescue
|
||||
|
||||
kernel ${base-url}/vmlinuz initrd=sysresccd.img boot=systemrescue docache
|
||||
initrd ${base-url}/sysresccd.img
|
||||
boot
|
||||
|
||||
#################################
|
||||
# MemTest86 (BIOS/UEFI)
|
||||
#################################
|
||||
:memtest
|
||||
iseq ${platform} efi && goto memtest_efi || goto memtest_bios
|
||||
|
||||
:memtest_efi
|
||||
kernel http://192.168.33.1:8080/memtest/memtest64.efi
|
||||
boot
|
||||
|
||||
:memtest_bios
|
||||
kernel http://192.168.33.1:8080/memtest/memtest64.bin
|
||||
boot
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
1
data/watchguard/pxe-http-files/paul
Normal file
1
data/watchguard/pxe-http-files/paul
Normal file
@@ -0,0 +1 @@
|
||||
hey i am paul
|
||||
BIN
data/watchguard/pxe-http-files/slitaz/rootfs.gz
(Stored with Git LFS)
BIN
data/watchguard/pxe-http-files/slitaz/rootfs.gz
(Stored with Git LFS)
Binary file not shown.
BIN
data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz
(Stored with Git LFS)
BIN
data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz
(Stored with Git LFS)
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1 +0,0 @@
|
||||
Not much here yet, see the `adr` folder for now. More to come in time!
|
||||
@@ -1,13 +0,0 @@
|
||||
## Conceptual metaphor : The Cyborg and the Central Nervous System
|
||||
|
||||
At the heart of Harmony lies a core belief: in modern, decentralized systems, **software and infrastructure are not separate entities.** They are a single, symbiotic organism—a cyborg.
|
||||
|
||||
The software is the electronics, the "mind"; the infrastructure is the biological host, the "body". They live or die, thrive or sink together.
|
||||
|
||||
Traditional approaches attempt to manage this complex organism with fragmented tools: static YAML for configuration, brittle scripts for automation, and separate Infrastructure as Code (IaC) for provisioning. This creates a disjointed system that struggles to scale or heal itself, making it inadequate for the demands of fully automated, enterprise-grade clusters.
|
||||
|
||||
Harmony's goal is to provide the **central nervous system for this cyborg**. We aim to achieve the full automation of complex, decentralized clouds by managing this integrated entity holistically.
|
||||
|
||||
To achieve this, a tool must be both robust and powerful. It must manage the entire lifecycle—deployment, upgrades, failure recovery, and decommissioning—with precision. This requires full control over application packaging and a deep, intrinsic integration between the software and the infrastructure it inhabits.
|
||||
|
||||
This is why Harmony uses a powerful, living language like Rust. It replaces static, lifeless configuration files with a dynamic, breathing codebase. It allows us to express the complex relationships and behaviors of a modern distributed system, enabling the creation of truly automated, resilient, and powerful platforms that can thrive.
|
||||
@@ -8,7 +8,7 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_tui = { path = "../../harmony_tui" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
@@ -1,85 +1,3 @@
|
||||
<?php
|
||||
|
||||
ini_set('display_errors', 1);
|
||||
error_reporting(E_ALL);
|
||||
|
||||
$host = getenv('MYSQL_HOST') ?: '';
|
||||
$user = getenv('MYSQL_USER') ?: 'root';
|
||||
$pass = getenv('MYSQL_PASSWORD') ?: '';
|
||||
$db = 'testfill';
|
||||
$charset = 'utf8mb4';
|
||||
|
||||
$dsn = "mysql:host=$host;charset=$charset";
|
||||
$options = [
|
||||
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
|
||||
PDO::ATTR_DEFAULT_FETCH_MODE => PDO::FETCH_ASSOC,
|
||||
];
|
||||
|
||||
try {
|
||||
$pdo = new PDO($dsn, $user, $pass, $options);
|
||||
$pdo->exec("CREATE DATABASE IF NOT EXISTS `$db`");
|
||||
$pdo->exec("USE `$db`");
|
||||
$pdo->exec("
|
||||
CREATE TABLE IF NOT EXISTS filler (
|
||||
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||
data LONGBLOB
|
||||
)
|
||||
");
|
||||
} catch (\PDOException $e) {
|
||||
die("❌ DB connection failed: " . $e->getMessage());
|
||||
}
|
||||
|
||||
function getDbStats($pdo, $db) {
|
||||
$stmt = $pdo->query("
|
||||
SELECT
|
||||
ROUND(SUM(data_length + index_length) / 1024 / 1024 / 1024, 2) AS total_size_gb,
|
||||
SUM(table_rows) AS total_rows
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = '$db'
|
||||
");
|
||||
$result = $stmt->fetch();
|
||||
$sizeGb = $result['total_size_gb'] ?? '0';
|
||||
$rows = $result['total_rows'] ?? '0';
|
||||
$avgMb = ($rows > 0) ? round(($sizeGb * 1024) / $rows, 2) : 0;
|
||||
return [$sizeGb, $rows, $avgMb];
|
||||
}
|
||||
|
||||
list($dbSize, $rowCount, $avgRowMb) = getDbStats($pdo, $db);
|
||||
|
||||
$message = '';
|
||||
|
||||
if ($_SERVER['REQUEST_METHOD'] === 'POST' && isset($_POST['fill'])) {
|
||||
$iterations = 1024;
|
||||
$data = str_repeat(random_bytes(1024), 1024); // 1MB
|
||||
$stmt = $pdo->prepare("INSERT INTO filler (data) VALUES (:data)");
|
||||
|
||||
for ($i = 0; $i < $iterations; $i++) {
|
||||
$stmt->execute([':data' => $data]);
|
||||
}
|
||||
|
||||
list($dbSize, $rowCount, $avgRowMb) = getDbStats($pdo, $db);
|
||||
|
||||
$message = "<p style='color: green;'>✅ 1GB inserted into MariaDB successfully.</p>";
|
||||
}
|
||||
print_r("Hello this is from PHP")
|
||||
?>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>MariaDB Filler</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>MariaDB Storage Filler</h1>
|
||||
<?= $message ?>
|
||||
<ul>
|
||||
<li><strong>📦 MariaDB Used Size:</strong> <?= $dbSize ?> GB</li>
|
||||
<li><strong>📊 Total Rows:</strong> <?= $rowCount ?></li>
|
||||
<li><strong>📐 Average Row Size:</strong> <?= $avgRowMb ?> MB</li>
|
||||
</ul>
|
||||
|
||||
<form method="post">
|
||||
<button name="fill" value="1" type="submit">Insert 1GB into DB</button>
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
||||
@@ -2,56 +2,34 @@ use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::lamp::{LAMPConfig, LAMPScore},
|
||||
modules::lamp::{LAMPConfig, LAMPProfile, LAMPScore},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// This here is the whole configuration to
|
||||
// - setup a local K3D cluster
|
||||
// - Build a docker image with the PHP project builtin and production grade settings
|
||||
// - Deploy a mariadb database using a production grade helm chart
|
||||
// - Deploy the new container using a kubernetes deployment
|
||||
// - Configure networking between the PHP container and the database
|
||||
// - Provision a public route and an SSL certificate automatically on production environments
|
||||
//
|
||||
// Enjoy :)
|
||||
// let _ = env_logger::Builder::from_default_env().filter_level(log::LevelFilter::Info).try_init();
|
||||
let lamp_stack = LAMPScore {
|
||||
name: "harmony-lamp-demo".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://lampdemo.harmony.nationtech.io").unwrap()),
|
||||
php_version: Version::from("8.4.4").unwrap(),
|
||||
// This config can be extended as needed for more complicated configurations
|
||||
config: LAMPConfig {
|
||||
project_root: "./php".into(),
|
||||
database_size: format!("4Gi").into(),
|
||||
..Default::default()
|
||||
},
|
||||
profiles: HashMap::from([
|
||||
("dev", LAMPProfile { ssl_enabled: false }),
|
||||
("prod", LAMPProfile { ssl_enabled: true }),
|
||||
]),
|
||||
};
|
||||
|
||||
//let monitoring = MonitoringAlertingScore {
|
||||
// alert_receivers: vec![Box::new(DiscordWebhook {
|
||||
// url: Url::Url(url::Url::parse("https://discord.idonotexist.com").unwrap()),
|
||||
// // TODO write url macro
|
||||
// // url: url!("https://discord.idonotexist.com"),
|
||||
// })],
|
||||
// alert_rules: vec![],
|
||||
// scrape_targets: vec![],
|
||||
//};
|
||||
|
||||
// You can choose the type of Topology you want, we suggest starting with the
|
||||
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
|
||||
// locally, to development environment from a CI, to staging, and to production with settings
|
||||
// that automatically adapt to each environment grade.
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
K8sAnywhereTopology::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(lamp_stack)]);
|
||||
// Here we bootstrap the CLI, this gives some nice features if you need them
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
harmony_tui::init(maestro).await.unwrap();
|
||||
}
|
||||
// That's it, end of the infra as code.
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "example-monitoring"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
@@ -1,57 +0,0 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
},
|
||||
prometheus::alerts::{
|
||||
infra::dell_server::{
|
||||
alert_global_storage_status_critical, alert_global_storage_status_non_recoverable,
|
||||
global_storage_status_degraded_non_critical,
|
||||
},
|
||||
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
||||
let dell_system_storage_degraded = global_storage_status_degraded_non_critical();
|
||||
let alert_global_storage_status_critical = alert_global_storage_status_critical();
|
||||
let alert_global_storage_status_non_recoverable = alert_global_storage_status_non_recoverable();
|
||||
|
||||
let additional_rules =
|
||||
AlertManagerRuleGroup::new("pvc-alerts", vec![high_pvc_fill_rate_over_two_days_alert]);
|
||||
let additional_rules2 = AlertManagerRuleGroup::new(
|
||||
"dell-server-alerts",
|
||||
vec![
|
||||
dell_system_storage_degraded,
|
||||
alert_global_storage_status_critical,
|
||||
alert_global_storage_status_non_recoverable,
|
||||
],
|
||||
);
|
||||
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(discord_receiver)],
|
||||
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
|
||||
};
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(alerting_score)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \
|
||||
--set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f values.yaml
|
||||
@@ -1,721 +0,0 @@
|
||||
# Default values for a single rook-ceph cluster
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# -- Namespace of the main rook operator
|
||||
operatorNamespace: rook-ceph
|
||||
|
||||
# -- The metadata.name of the CephCluster CR
|
||||
# @default -- The same as the namespace
|
||||
clusterName:
|
||||
|
||||
# -- Optional override of the target kubernetes version
|
||||
kubeVersion:
|
||||
|
||||
# -- Cluster ceph.conf override
|
||||
configOverride:
|
||||
# configOverride: |
|
||||
# [global]
|
||||
# mon_allow_pool_delete = true
|
||||
# osd_pool_default_size = 3
|
||||
# osd_pool_default_min_size = 2
|
||||
|
||||
# Installs a debugging toolbox deployment
|
||||
toolbox:
|
||||
# -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
|
||||
enabled: true
|
||||
# -- Toolbox image, defaults to the image used by the Ceph cluster
|
||||
image: #quay.io/ceph/ceph:v19.2.2
|
||||
# -- Toolbox tolerations
|
||||
tolerations: []
|
||||
# -- Toolbox affinity
|
||||
affinity: {}
|
||||
# -- Toolbox container security context
|
||||
containerSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2016
|
||||
runAsGroup: 2016
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
# -- Toolbox resources
|
||||
resources:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
# -- Set the priority class for the toolbox if desired
|
||||
priorityClassName:
|
||||
|
||||
monitoring:
|
||||
# -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
|
||||
# Monitoring requires Prometheus to be pre-installed
|
||||
enabled: false
|
||||
# -- Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled
|
||||
metricsDisabled: false
|
||||
# -- Whether to create the Prometheus rules for Ceph alerts
|
||||
createPrometheusRules: false
|
||||
# -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
|
||||
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
|
||||
# deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
|
||||
rulesNamespaceOverride:
|
||||
# Monitoring settings for external clusters:
|
||||
# externalMgrEndpoints: <list of endpoints>
|
||||
# externalMgrPrometheusPort: <port>
|
||||
# Scrape interval for prometheus
|
||||
# interval: 10s
|
||||
# allow adding custom labels and annotations to the prometheus rule
|
||||
prometheusRule:
|
||||
# -- Labels applied to PrometheusRule
|
||||
labels: {}
|
||||
# -- Annotations applied to PrometheusRule
|
||||
annotations: {}
|
||||
|
||||
# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
|
||||
pspEnable: false
|
||||
|
||||
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
|
||||
# imagePullSecrets:
|
||||
# - name: my-registry-secret
|
||||
|
||||
# All values below are taken from the CephCluster CRD
|
||||
# -- Cluster configuration.
|
||||
# @default -- See [below](#ceph-cluster-spec)
|
||||
cephClusterSpec:
|
||||
# This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
|
||||
# as in the host-based example (cluster.yaml). For a different configuration such as a
|
||||
# PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
|
||||
# or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
|
||||
# with the specs from those examples.
|
||||
|
||||
# For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
|
||||
cephVersion:
|
||||
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
|
||||
# v18 is Reef, v19 is Squid
|
||||
# RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
|
||||
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
|
||||
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.2-20250409
|
||||
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
|
||||
image: quay.io/ceph/ceph:v19.2.2
|
||||
# Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
|
||||
# Future versions such as Tentacle (v20) would require this to be set to `true`.
|
||||
# Do not set to true in production.
|
||||
allowUnsupported: false
|
||||
|
||||
# The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
|
||||
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
|
||||
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
|
||||
dataDirHostPath: /var/lib/rook
|
||||
|
||||
# Whether or not upgrade should continue even if a check fails
|
||||
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
|
||||
# Use at your OWN risk
|
||||
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
|
||||
skipUpgradeChecks: false
|
||||
|
||||
# Whether or not continue if PGs are not clean during an upgrade
|
||||
continueUpgradeAfterChecksEvenIfNotHealthy: false
|
||||
|
||||
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
|
||||
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
|
||||
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
|
||||
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
|
||||
# The default wait timeout is 10 minutes.
|
||||
waitTimeoutForHealthyOSDInMinutes: 10
|
||||
|
||||
# Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
|
||||
# This configuration will be ignored if `skipUpgradeChecks` is `true`.
|
||||
# Default is false.
|
||||
upgradeOSDRequiresHealthyPGs: false
|
||||
|
||||
mon:
|
||||
# Set the number of mons to be started. Generally recommended to be 3.
|
||||
# For highest availability, an odd number of mons should be specified.
|
||||
count: 3
|
||||
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
|
||||
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
|
||||
allowMultiplePerNode: false
|
||||
|
||||
mgr:
|
||||
# When higher availability of the mgr is needed, increase the count to 2.
|
||||
# In that case, one mgr will be active and one in standby. When Ceph updates which
|
||||
# mgr is active, Rook will update the mgr services to match the active mgr.
|
||||
count: 2
|
||||
allowMultiplePerNode: false
|
||||
modules:
|
||||
# List of modules to optionally enable or disable.
|
||||
# Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
|
||||
# - name: rook
|
||||
# enabled: true
|
||||
|
||||
# enable the ceph dashboard for viewing cluster status
|
||||
dashboard:
|
||||
enabled: true
|
||||
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
|
||||
# urlPrefix: /ceph-dashboard
|
||||
# serve the dashboard at the given port.
|
||||
# port: 8443
|
||||
# Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
|
||||
# the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
|
||||
ssl: true
|
||||
|
||||
# Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
|
||||
network:
|
||||
connections:
|
||||
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
|
||||
# The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
|
||||
# When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
|
||||
# IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
|
||||
# you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
|
||||
# The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
|
||||
encryption:
|
||||
enabled: false
|
||||
# Whether to compress the data in transit across the wire. The default is false.
|
||||
# The kernel requirements above for encryption also apply to compression.
|
||||
compression:
|
||||
enabled: false
|
||||
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
|
||||
# and clients will be required to connect to the Ceph cluster with the v2 port (3300).
|
||||
# Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
|
||||
requireMsgr2: false
|
||||
# # enable host networking
|
||||
# provider: host
|
||||
# # EXPERIMENTAL: enable the Multus network provider
|
||||
# provider: multus
|
||||
# selectors:
|
||||
# # The selector keys are required to be `public` and `cluster`.
|
||||
# # Based on the configuration, the operator will do the following:
|
||||
# # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
|
||||
# # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
|
||||
# #
|
||||
# # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
|
||||
# #
|
||||
# # public: public-conf --> NetworkAttachmentDefinition object name in Multus
|
||||
# # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
|
||||
# # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
|
||||
# ipFamily: "IPv6"
|
||||
# # Ceph daemons to listen on both IPv4 and Ipv6 networks
|
||||
# dualStack: false
|
||||
|
||||
# enable the crash collector for ceph daemon crash collection
|
||||
crashCollector:
|
||||
disable: false
|
||||
# Uncomment daysToRetain to prune ceph crash entries older than the
|
||||
# specified number of days.
|
||||
# daysToRetain: 30
|
||||
|
||||
# enable log collector, daemons will log on files and rotate
|
||||
logCollector:
|
||||
enabled: true
|
||||
periodicity: daily # one of: hourly, daily, weekly, monthly
|
||||
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
|
||||
|
||||
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
|
||||
cleanupPolicy:
|
||||
# Since cluster cleanup is destructive to data, confirmation is required.
|
||||
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
|
||||
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
|
||||
# Rook will immediately stop configuring the cluster and only wait for the delete command.
|
||||
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
|
||||
confirmation: ""
|
||||
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
|
||||
sanitizeDisks:
|
||||
# method indicates if the entire disk should be sanitized or simply ceph's metadata
|
||||
# in both case, re-install is possible
|
||||
# possible choices are 'complete' or 'quick' (default)
|
||||
method: quick
|
||||
# dataSource indicate where to get random bytes from to write on the disk
|
||||
# possible choices are 'zero' (default) or 'random'
|
||||
# using random sources will consume entropy from the system and will take much more time then the zero source
|
||||
dataSource: zero
|
||||
# iteration overwrite N times instead of the default (1)
|
||||
# takes an integer value
|
||||
iteration: 1
|
||||
# allowUninstallWithVolumes defines how the uninstall should be performed
|
||||
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
|
||||
allowUninstallWithVolumes: false
|
||||
|
||||
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
|
||||
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
|
||||
# tolerate taints with a key of 'storage-node'.
|
||||
# placement:
|
||||
# all:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: role
|
||||
# operator: In
|
||||
# values:
|
||||
# - storage-node
|
||||
# podAffinity:
|
||||
# podAntiAffinity:
|
||||
# topologySpreadConstraints:
|
||||
# tolerations:
|
||||
# - key: storage-node
|
||||
# operator: Exists
|
||||
# # The above placement information can also be specified for mon, osd, and mgr components
|
||||
# mon:
|
||||
# # Monitor deployments may contain an anti-affinity rule for avoiding monitor
|
||||
# # collocation on the same node. This is a required rule when host network is used
|
||||
# # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
|
||||
# # preferred rule with weight: 50.
|
||||
# osd:
|
||||
# mgr:
|
||||
# cleanup:
|
||||
|
||||
# annotations:
|
||||
# all:
|
||||
# mon:
|
||||
# osd:
|
||||
# cleanup:
|
||||
# prepareosd:
|
||||
# # If no mgr annotations are set, prometheus scrape annotations will be set by default.
|
||||
# mgr:
|
||||
# dashboard:
|
||||
|
||||
# labels:
|
||||
# all:
|
||||
# mon:
|
||||
# osd:
|
||||
# cleanup:
|
||||
# mgr:
|
||||
# prepareosd:
|
||||
# # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
|
||||
# # These labels can be passed as LabelSelector to Prometheus
|
||||
# monitoring:
|
||||
# dashboard:
|
||||
|
||||
resources:
|
||||
mgr:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
mon:
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
requests:
|
||||
cpu: "1000m"
|
||||
memory: "1Gi"
|
||||
osd:
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
requests:
|
||||
cpu: "1000m"
|
||||
memory: "4Gi"
|
||||
prepareosd:
|
||||
# limits: It is not recommended to set limits on the OSD prepare job
|
||||
# since it's a one-time burst for memory that must be allowed to
|
||||
# complete without an OOM kill. Note however that if a k8s
|
||||
# limitRange guardrail is defined external to Rook, the lack of
|
||||
# a limit here may result in a sync failure, in which case a
|
||||
# limit should be added. 1200Mi may suffice for up to 15Ti
|
||||
# OSDs ; for larger devices 2Gi may be required.
|
||||
# cf. https://github.com/rook/rook/pull/11103
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "50Mi"
|
||||
mgr-sidecar:
|
||||
limits:
|
||||
memory: "100Mi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "40Mi"
|
||||
crashcollector:
|
||||
limits:
|
||||
memory: "60Mi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "60Mi"
|
||||
logcollector:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
cleanup:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "100Mi"
|
||||
exporter:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "50Mi"
|
||||
|
||||
# The option to automatically remove OSDs that are out and are safe to destroy.
|
||||
removeOSDsIfOutAndSafeToRemove: false
|
||||
|
||||
# priority classes to apply to ceph resources
|
||||
priorityClassNames:
|
||||
mon: system-node-critical
|
||||
osd: system-node-critical
|
||||
mgr: system-cluster-critical
|
||||
|
||||
storage: # cluster level storage configuration and selection
|
||||
useAllNodes: true
|
||||
useAllDevices: true
|
||||
# deviceFilter:
|
||||
# config:
|
||||
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
|
||||
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
|
||||
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
|
||||
# osdsPerDevice: "1" # this value can be overridden at the node or device level
|
||||
# encryptedDevice: "true" # the default value for this option is "false"
|
||||
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
|
||||
# # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
|
||||
# nodes:
|
||||
# - name: "172.17.4.201"
|
||||
# devices: # specific devices to use for storage can be specified for each node
|
||||
# - name: "sdb"
|
||||
# - name: "nvme01" # multiple osds can be created on high performance devices
|
||||
# config:
|
||||
# osdsPerDevice: "5"
|
||||
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
|
||||
# config: # configuration can be specified at the node level which overrides the cluster level config
|
||||
# - name: "172.17.4.301"
|
||||
# deviceFilter: "^sd."
|
||||
|
||||
# The section for configuring management of daemon disruptions during upgrade or fencing.
|
||||
disruptionManagement:
|
||||
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
|
||||
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
|
||||
# block eviction of OSDs by default and unblock them safely when drains are detected.
|
||||
managePodBudgets: true
|
||||
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
|
||||
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
|
||||
osdMaintenanceTimeout: 30
|
||||
|
||||
# Configure the healthcheck and liveness probes for ceph pods.
|
||||
# Valid values for daemons are 'mon', 'osd', 'status'
|
||||
healthCheck:
|
||||
daemonHealth:
|
||||
mon:
|
||||
disabled: false
|
||||
interval: 45s
|
||||
osd:
|
||||
disabled: false
|
||||
interval: 60s
|
||||
status:
|
||||
disabled: false
|
||||
interval: 60s
|
||||
# Change pod liveness probe, it works for all mon, mgr, and osd pods.
|
||||
livenessProbe:
|
||||
mon:
|
||||
disabled: false
|
||||
mgr:
|
||||
disabled: false
|
||||
osd:
|
||||
disabled: false
|
||||
|
||||
ingress:
|
||||
# -- Enable an ingress for the ceph-dashboard
|
||||
dashboard:
|
||||
# {}
|
||||
# labels:
|
||||
# external-dns/private: "true"
|
||||
annotations:
|
||||
"route.openshift.io/termination": "passthrough"
|
||||
# external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
|
||||
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
|
||||
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
|
||||
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
# nginx.ingress.kubernetes.io/server-snippet: |
|
||||
# proxy_ssl_verify off;
|
||||
host:
|
||||
name: ceph.apps.ncd0.harmony.mcd
|
||||
path: null # TODO the chart does not allow removing the path, and it causes openshift to fail creating a route, because path is not supported with termination mode passthrough
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- {}
|
||||
# secretName: testsecret-tls
|
||||
# Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
|
||||
# to set the ingress class
|
||||
# ingressClassName: openshift-default
|
||||
# labels:
|
||||
# external-dns/private: "true"
|
||||
# annotations:
|
||||
# external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
|
||||
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
|
||||
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
|
||||
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
# nginx.ingress.kubernetes.io/server-snippet: |
|
||||
# proxy_ssl_verify off;
|
||||
# host:
|
||||
# name: dashboard.example.com
|
||||
# path: "/ceph-dashboard(/|$)(.*)"
|
||||
# pathType: Prefix
|
||||
# tls:
|
||||
# - hosts:
|
||||
# - dashboard.example.com
|
||||
# secretName: testsecret-tls
|
||||
## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
|
||||
## to set the ingress class
|
||||
# ingressClassName: nginx
|
||||
|
||||
# -- A list of CephBlockPool configurations to deploy
|
||||
# @default -- See [below](#ceph-block-pools)
|
||||
cephBlockPools:
|
||||
- name: ceph-blockpool
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
|
||||
spec:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 3
|
||||
# Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
|
||||
# For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
|
||||
# enableRBDStats: true
|
||||
storageClass:
|
||||
enabled: true
|
||||
name: ceph-block
|
||||
annotations: {}
|
||||
labels: {}
|
||||
isDefault: true
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
volumeBindingMode: "Immediate"
|
||||
mountOptions: []
|
||||
# see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
|
||||
allowedTopologies: []
|
||||
# - matchLabelExpressions:
|
||||
# - key: rook-ceph-role
|
||||
# values:
|
||||
# - storage-node
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
|
||||
parameters:
|
||||
# (optional) mapOptions is a comma-separated list of map options.
|
||||
# For krbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# For nbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
||||
# mapOptions: lock_on_read,queue_depth=1024
|
||||
|
||||
# (optional) unmapOptions is a comma-separated list of unmap options.
|
||||
# For krbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# For nbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
||||
# unmapOptions: force
|
||||
|
||||
# RBD image format. Defaults to "2".
|
||||
imageFormat: "2"
|
||||
|
||||
# RBD image features, equivalent to OR'd bitfield value: 63
|
||||
# Available for imageFormat: "2". Older releases of CSI RBD
|
||||
# support only the `layering` feature. The Linux kernel (KRBD) supports the
|
||||
# full feature complement as of 5.4
|
||||
imageFeatures: layering
|
||||
|
||||
# These secrets contain Ceph admin credentials.
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
|
||||
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
||||
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
||||
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
||||
csi.storage.k8s.io/fstype: ext4
|
||||
|
||||
# -- A list of CephFileSystem configurations to deploy
|
||||
# @default -- See [below](#ceph-file-systems)
|
||||
cephFileSystems:
|
||||
- name: ceph-filesystem
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
|
||||
spec:
|
||||
metadataPool:
|
||||
replicated:
|
||||
size: 3
|
||||
dataPools:
|
||||
- failureDomain: host
|
||||
replicated:
|
||||
size: 3
|
||||
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
|
||||
name: data0
|
||||
metadataServer:
|
||||
activeCount: 1
|
||||
activeStandby: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
requests:
|
||||
cpu: "1000m"
|
||||
memory: "4Gi"
|
||||
priorityClassName: system-cluster-critical
|
||||
storageClass:
|
||||
enabled: true
|
||||
isDefault: false
|
||||
name: ceph-filesystem
|
||||
# (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
|
||||
pool: data0
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
volumeBindingMode: "Immediate"
|
||||
annotations: {}
|
||||
labels: {}
|
||||
mountOptions: []
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
|
||||
parameters:
|
||||
# The secrets contain Ceph admin credentials.
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
|
||||
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
||||
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
||||
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
||||
csi.storage.k8s.io/fstype: ext4
|
||||
|
||||
# -- Settings for the filesystem snapshot class
|
||||
# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
|
||||
cephFileSystemVolumeSnapshotClass:
|
||||
enabled: false
|
||||
name: ceph-filesystem
|
||||
isDefault: true
|
||||
deletionPolicy: Delete
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
|
||||
parameters: {}
|
||||
|
||||
# -- Settings for the block pool snapshot class
|
||||
# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
|
||||
cephBlockPoolsVolumeSnapshotClass:
|
||||
enabled: false
|
||||
name: ceph-block
|
||||
isDefault: false
|
||||
deletionPolicy: Delete
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
|
||||
parameters: {}
|
||||
|
||||
# -- A list of CephObjectStore configurations to deploy
|
||||
# @default -- See [below](#ceph-object-stores)
|
||||
cephObjectStores:
|
||||
- name: ceph-objectstore
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
|
||||
spec:
|
||||
metadataPool:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 3
|
||||
dataPool:
|
||||
failureDomain: host
|
||||
erasureCoded:
|
||||
dataChunks: 2
|
||||
codingChunks: 1
|
||||
parameters:
|
||||
bulk: "true"
|
||||
preservePoolsOnDelete: true
|
||||
gateway:
|
||||
port: 80
|
||||
resources:
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
requests:
|
||||
cpu: "1000m"
|
||||
memory: "1Gi"
|
||||
# securePort: 443
|
||||
# sslCertificateRef:
|
||||
instances: 1
|
||||
priorityClassName: system-cluster-critical
|
||||
# opsLogSidecar:
|
||||
# resources:
|
||||
# limits:
|
||||
# memory: "100Mi"
|
||||
# requests:
|
||||
# cpu: "100m"
|
||||
# memory: "40Mi"
|
||||
storageClass:
|
||||
enabled: true
|
||||
name: ceph-bucket
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: "Immediate"
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
|
||||
parameters:
|
||||
# note: objectStoreNamespace and objectStoreName are configured by the chart
|
||||
region: us-east-1
|
||||
ingress:
|
||||
# Enable an ingress for the ceph-objectstore
|
||||
enabled: true
|
||||
# The ingress port by default will be the object store's "securePort" (if set), or the gateway "port".
|
||||
# To override those defaults, set this ingress port to the desired port.
|
||||
# port: 80
|
||||
# annotations: {}
|
||||
host:
|
||||
name: objectstore.apps.ncd0.harmony.mcd
|
||||
path: /
|
||||
pathType: Prefix
|
||||
# tls:
|
||||
# - hosts:
|
||||
# - objectstore.example.com
|
||||
# secretName: ceph-objectstore-tls
|
||||
# ingressClassName: nginx
|
||||
## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
|
||||
## For erasure coded a replicated metadata pool is required.
|
||||
## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
|
||||
#cephECBlockPools:
|
||||
# - name: ec-pool
|
||||
# spec:
|
||||
# metadataPool:
|
||||
# replicated:
|
||||
# size: 2
|
||||
# dataPool:
|
||||
# failureDomain: osd
|
||||
# erasureCoded:
|
||||
# dataChunks: 2
|
||||
# codingChunks: 1
|
||||
# deviceClass: hdd
|
||||
#
|
||||
# parameters:
|
||||
# # clusterID is the namespace where the rook cluster is running
|
||||
# # If you change this namespace, also change the namespace below where the secret namespaces are defined
|
||||
# clusterID: rook-ceph # namespace:cluster
|
||||
# # (optional) mapOptions is a comma-separated list of map options.
|
||||
# # For krbd options refer
|
||||
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# # For nbd options refer
|
||||
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
||||
# # mapOptions: lock_on_read,queue_depth=1024
|
||||
#
|
||||
# # (optional) unmapOptions is a comma-separated list of unmap options.
|
||||
# # For krbd options refer
|
||||
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# # For nbd options refer
|
||||
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
||||
# # unmapOptions: force
|
||||
#
|
||||
# # RBD image format. Defaults to "2".
|
||||
# imageFormat: "2"
|
||||
#
|
||||
# # RBD image features, equivalent to OR'd bitfield value: 63
|
||||
# # Available for imageFormat: "2". Older releases of CSI RBD
|
||||
# # support only the `layering` feature. The Linux kernel (KRBD) supports the
|
||||
# # full feature complement as of 5.4
|
||||
# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
|
||||
# imageFeatures: layering
|
||||
#
|
||||
# storageClass:
|
||||
# provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
|
||||
# enabled: true
|
||||
# name: rook-ceph-block
|
||||
# isDefault: false
|
||||
# annotations: { }
|
||||
# labels: { }
|
||||
# allowVolumeExpansion: true
|
||||
# reclaimPolicy: Delete
|
||||
|
||||
# -- CSI driver name prefix for cephfs, rbd and nfs.
|
||||
# @default -- `namespace name where rook-ceph operator is deployed`
|
||||
csiDriverNamePrefix:
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
helm repo add rook-release https://charts.rook.io/release
|
||||
helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph -f values.yaml
|
||||
@@ -1,674 +0,0 @@
|
||||
# Default values for rook-ceph-operator
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
# -- Image
|
||||
repository: docker.io/rook/ceph
|
||||
# -- Image tag
|
||||
# @default -- `master`
|
||||
tag: v1.17.1
|
||||
# -- Image pull policy
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
crds:
|
||||
# -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
|
||||
# managed independently with deploy/examples/crds.yaml.
|
||||
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
|
||||
# If the CRDs are deleted in this case, see
|
||||
# [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
|
||||
# to restore them.
|
||||
enabled: true
|
||||
|
||||
# -- Pod resource requests & limits
|
||||
resources:
|
||||
limits:
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
|
||||
# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
|
||||
nodeSelector: {}
|
||||
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
|
||||
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# disktype: ssd
|
||||
|
||||
# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
|
||||
tolerations: []
|
||||
|
||||
# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
|
||||
# the Kubernetes default of 5 minutes
|
||||
unreachableNodeTolerationSeconds: 5
|
||||
|
||||
# -- Whether the operator should watch cluster CRD in its own namespace or not
|
||||
currentNamespaceOnly: false
|
||||
|
||||
# -- Custom pod labels for the operator
|
||||
operatorPodLabels: {}
|
||||
|
||||
# -- Pod annotations
|
||||
annotations: {}
|
||||
|
||||
# -- Global log level for the operator.
|
||||
# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
|
||||
logLevel: INFO
|
||||
|
||||
# -- If true, create & use RBAC resources
|
||||
rbacEnable: true
|
||||
|
||||
rbacAggregate:
|
||||
# -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
|
||||
enableOBCs: false
|
||||
|
||||
# -- If true, create & use PSP resources
|
||||
pspEnable: false
|
||||
|
||||
# -- Set the priority class for the rook operator deployment if desired
|
||||
priorityClassName:
|
||||
|
||||
# -- Set the container security context for the operator
|
||||
containerSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2016
|
||||
runAsGroup: 2016
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
# -- If true, loop devices are allowed to be used for osds in test clusters
|
||||
allowLoopDevices: false
|
||||
|
||||
# Settings for whether to disable the drivers or other daemons if they are not
|
||||
# needed
|
||||
csi:
|
||||
# -- Enable Ceph CSI RBD driver
|
||||
enableRbdDriver: true
|
||||
# -- Enable Ceph CSI CephFS driver
|
||||
enableCephfsDriver: true
|
||||
# -- Disable the CSI driver.
|
||||
disableCsiDriver: "false"
|
||||
|
||||
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
|
||||
# in some network configurations where the SDN does not provide access to an external cluster or
|
||||
# there is significant drop in read/write performance
|
||||
enableCSIHostNetwork: true
|
||||
# -- Enable Snapshotter in CephFS provisioner pod
|
||||
enableCephfsSnapshotter: true
|
||||
# -- Enable Snapshotter in NFS provisioner pod
|
||||
enableNFSSnapshotter: true
|
||||
# -- Enable Snapshotter in RBD provisioner pod
|
||||
enableRBDSnapshotter: true
|
||||
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
|
||||
enablePluginSelinuxHostMount: false
|
||||
# -- Enable Ceph CSI PVC encryption support
|
||||
enableCSIEncryption: false
|
||||
|
||||
# -- Enable volume group snapshot feature. This feature is
|
||||
# enabled by default as long as the necessary CRDs are available in the cluster.
|
||||
enableVolumeGroupSnapshot: true
|
||||
# -- PriorityClassName to be set on csi driver plugin pods
|
||||
pluginPriorityClassName: system-node-critical
|
||||
|
||||
# -- PriorityClassName to be set on csi driver provisioner pods
|
||||
provisionerPriorityClassName: system-cluster-critical
|
||||
|
||||
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
|
||||
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
|
||||
rbdFSGroupPolicy: "File"
|
||||
|
||||
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
|
||||
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
|
||||
cephFSFSGroupPolicy: "File"
|
||||
|
||||
# -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
|
||||
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
|
||||
nfsFSGroupPolicy: "File"
|
||||
|
||||
# -- OMAP generator generates the omap mapping between the PV name and the RBD image
|
||||
# which helps CSI to identify the rbd images for CSI operations.
|
||||
# `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
|
||||
# By default OMAP generator is disabled and when enabled, it will be deployed as a
|
||||
# sidecar with CSI provisioner pod, to enable set it to true.
|
||||
enableOMAPGenerator: false
|
||||
|
||||
# -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
|
||||
# Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
|
||||
cephFSKernelMountOptions:
|
||||
|
||||
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
|
||||
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
|
||||
# Hence enable metadata is false by default
|
||||
enableMetadata: false
|
||||
|
||||
# -- Set replicas for csi provisioner deployment
|
||||
provisionerReplicas: 2
|
||||
|
||||
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
|
||||
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
|
||||
clusterName:
|
||||
|
||||
# -- Set logging level for cephCSI containers maintained by the cephCSI.
|
||||
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
|
||||
logLevel: 0
|
||||
|
||||
# -- Set logging level for Kubernetes-csi sidecar containers.
|
||||
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
|
||||
# @default -- `0`
|
||||
sidecarLogLevel:
|
||||
|
||||
# -- CSI driver name prefix for cephfs, rbd and nfs.
|
||||
# @default -- `namespace name where rook-ceph operator is deployed`
|
||||
csiDriverNamePrefix:
|
||||
|
||||
# -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
|
||||
# @default -- `RollingUpdate`
|
||||
rbdPluginUpdateStrategy:
|
||||
|
||||
# -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
|
||||
# @default -- `1`
|
||||
rbdPluginUpdateStrategyMaxUnavailable:
|
||||
|
||||
# -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
|
||||
# @default -- `RollingUpdate`
|
||||
cephFSPluginUpdateStrategy:
|
||||
|
||||
# -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
|
||||
# @default -- `1`
|
||||
cephFSPluginUpdateStrategyMaxUnavailable:
|
||||
|
||||
# -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
|
||||
# @default -- `RollingUpdate`
|
||||
nfsPluginUpdateStrategy:
|
||||
|
||||
# -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
|
||||
grpcTimeoutInSeconds: 150
|
||||
|
||||
# -- Burst to use while communicating with the kubernetes apiserver.
|
||||
kubeApiBurst:
|
||||
|
||||
# -- QPS to use while communicating with the kubernetes apiserver.
|
||||
kubeApiQPS:
|
||||
|
||||
# -- The volume of the CephCSI RBD plugin DaemonSet
|
||||
csiRBDPluginVolume:
|
||||
# - name: lib-modules
|
||||
# hostPath:
|
||||
# path: /run/booted-system/kernel-modules/lib/modules/
|
||||
# - name: host-nix
|
||||
# hostPath:
|
||||
# path: /nix
|
||||
|
||||
# -- The volume mounts of the CephCSI RBD plugin DaemonSet
|
||||
csiRBDPluginVolumeMount:
|
||||
# - name: host-nix
|
||||
# mountPath: /nix
|
||||
# readOnly: true
|
||||
|
||||
# -- The volume of the CephCSI CephFS plugin DaemonSet
|
||||
csiCephFSPluginVolume:
|
||||
# - name: lib-modules
|
||||
# hostPath:
|
||||
# path: /run/booted-system/kernel-modules/lib/modules/
|
||||
# - name: host-nix
|
||||
# hostPath:
|
||||
# path: /nix
|
||||
|
||||
# -- The volume mounts of the CephCSI CephFS plugin DaemonSet
|
||||
csiCephFSPluginVolumeMount:
|
||||
# - name: host-nix
|
||||
# mountPath: /nix
|
||||
# readOnly: true
|
||||
|
||||
# -- CEPH CSI RBD provisioner resource requirement list
|
||||
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
|
||||
# @default -- see values.yaml
|
||||
csiRBDProvisionerResource: |
|
||||
- name : csi-provisioner
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-resizer
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-attacher
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-snapshotter
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-rbdplugin
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
limits:
|
||||
memory: 1Gi
|
||||
- name : csi-omap-generator
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 250m
|
||||
limits:
|
||||
memory: 1Gi
|
||||
- name : liveness-prometheus
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
|
||||
# -- CEPH CSI RBD plugin resource requirement list
|
||||
# @default -- see values.yaml
|
||||
csiRBDPluginResource: |
|
||||
- name : driver-registrar
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-rbdplugin
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 250m
|
||||
limits:
|
||||
memory: 1Gi
|
||||
- name : liveness-prometheus
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
|
||||
# -- CEPH CSI CephFS provisioner resource requirement list
|
||||
# @default -- see values.yaml
|
||||
csiCephFSProvisionerResource: |
|
||||
- name : csi-provisioner
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-resizer
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-attacher
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-snapshotter
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-cephfsplugin
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 250m
|
||||
limits:
|
||||
memory: 1Gi
|
||||
- name : liveness-prometheus
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
|
||||
# -- CEPH CSI CephFS plugin resource requirement list
|
||||
# @default -- see values.yaml
|
||||
csiCephFSPluginResource: |
|
||||
- name : driver-registrar
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-cephfsplugin
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 250m
|
||||
limits:
|
||||
memory: 1Gi
|
||||
- name : liveness-prometheus
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
|
||||
# -- CEPH CSI NFS provisioner resource requirement list
|
||||
# @default -- see values.yaml
|
||||
csiNFSProvisionerResource: |
|
||||
- name : csi-provisioner
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-nfsplugin
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 250m
|
||||
limits:
|
||||
memory: 1Gi
|
||||
- name : csi-attacher
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 250m
|
||||
limits:
|
||||
memory: 1Gi
|
||||
|
||||
# -- CEPH CSI NFS plugin resource requirement list
|
||||
# @default -- see values.yaml
|
||||
csiNFSPluginResource: |
|
||||
- name : driver-registrar
|
||||
resource:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
limits:
|
||||
memory: 256Mi
|
||||
- name : csi-nfsplugin
|
||||
resource:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 250m
|
||||
limits:
|
||||
memory: 1Gi
|
||||
|
||||
# Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
|
||||
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
|
||||
|
||||
# -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
|
||||
provisionerTolerations:
|
||||
# - key: key
|
||||
# operator: Exists
|
||||
# effect: NoSchedule
|
||||
|
||||
# -- The node labels for affinity of the CSI provisioner deployment [^1]
|
||||
provisionerNodeAffinity: #key1=value1,value2; key2=value3
|
||||
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
|
||||
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
|
||||
|
||||
# -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
|
||||
pluginTolerations:
|
||||
# - key: key
|
||||
# operator: Exists
|
||||
# effect: NoSchedule
|
||||
|
||||
# -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
|
||||
pluginNodeAffinity: # key1=value1,value2; key2=value3
|
||||
|
||||
# -- Enable Ceph CSI Liveness sidecar deployment
|
||||
enableLiveness: false
|
||||
|
||||
# -- CSI CephFS driver metrics port
|
||||
# @default -- `9081`
|
||||
cephfsLivenessMetricsPort:
|
||||
|
||||
# -- CSI Addons server port
|
||||
# @default -- `9070`
|
||||
csiAddonsPort:
|
||||
# -- CSI Addons server port for the RBD provisioner
|
||||
# @default -- `9070`
|
||||
csiAddonsRBDProvisionerPort:
|
||||
# -- CSI Addons server port for the Ceph FS provisioner
|
||||
# @default -- `9070`
|
||||
csiAddonsCephFSProvisionerPort:
|
||||
|
||||
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
|
||||
# you may want to disable this setting. However, this will cause an issue during upgrades
|
||||
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
|
||||
forceCephFSKernelClient: true
|
||||
|
||||
# -- Ceph CSI RBD driver metrics port
|
||||
# @default -- `8080`
|
||||
rbdLivenessMetricsPort:
|
||||
|
||||
serviceMonitor:
|
||||
# -- Enable ServiceMonitor for Ceph CSI drivers
|
||||
enabled: false
|
||||
# -- Service monitor scrape interval
|
||||
interval: 10s
|
||||
# -- ServiceMonitor additional labels
|
||||
labels: {}
|
||||
# -- Use a different namespace for the ServiceMonitor
|
||||
namespace:
|
||||
|
||||
# -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
|
||||
# @default -- `/var/lib/kubelet`
|
||||
kubeletDirPath:
|
||||
|
||||
# -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
|
||||
# @default -- `137s`
|
||||
csiLeaderElectionLeaseDuration:
|
||||
|
||||
# -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
|
||||
# @default -- `107s`
|
||||
csiLeaderElectionRenewDeadline:
|
||||
|
||||
# -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
|
||||
# @default -- `26s`
|
||||
csiLeaderElectionRetryPeriod:
|
||||
|
||||
cephcsi:
|
||||
# -- Ceph CSI image repository
|
||||
repository: quay.io/cephcsi/cephcsi
|
||||
# -- Ceph CSI image tag
|
||||
tag: v3.14.0
|
||||
|
||||
registrar:
|
||||
# -- Kubernetes CSI registrar image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
|
||||
# -- Registrar image tag
|
||||
tag: v2.13.0
|
||||
|
||||
provisioner:
|
||||
# -- Kubernetes CSI provisioner image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-provisioner
|
||||
# -- Provisioner image tag
|
||||
tag: v5.1.0
|
||||
|
||||
snapshotter:
|
||||
# -- Kubernetes CSI snapshotter image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
||||
# -- Snapshotter image tag
|
||||
tag: v8.2.0
|
||||
|
||||
attacher:
|
||||
# -- Kubernetes CSI Attacher image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-attacher
|
||||
# -- Attacher image tag
|
||||
tag: v4.8.0
|
||||
|
||||
resizer:
|
||||
# -- Kubernetes CSI resizer image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-resizer
|
||||
# -- Resizer image tag
|
||||
tag: v1.13.1
|
||||
|
||||
# -- Image pull policy
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
|
||||
cephfsPodLabels: #"key1=value1,key2=value2"
|
||||
|
||||
# -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
|
||||
nfsPodLabels: #"key1=value1,key2=value2"
|
||||
|
||||
# -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
|
||||
rbdPodLabels: #"key1=value1,key2=value2"
|
||||
|
||||
csiAddons:
|
||||
# -- Enable CSIAddons
|
||||
enabled: false
|
||||
# -- CSIAddons sidecar image repository
|
||||
repository: quay.io/csiaddons/k8s-sidecar
|
||||
# -- CSIAddons sidecar image tag
|
||||
tag: v0.12.0
|
||||
|
||||
nfs:
|
||||
# -- Enable the nfs csi driver
|
||||
enabled: false
|
||||
|
||||
topology:
|
||||
# -- Enable topology based provisioning
|
||||
enabled: false
|
||||
# NOTE: the value here serves as an example and needs to be
|
||||
# updated with node labels that define domains of interest
|
||||
# -- domainLabels define which node labels to use as domains
|
||||
# for CSI nodeplugins to advertise their domains
|
||||
domainLabels:
|
||||
# - kubernetes.io/hostname
|
||||
# - topology.kubernetes.io/zone
|
||||
# - topology.rook.io/rack
|
||||
|
||||
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
|
||||
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
|
||||
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
|
||||
# of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
|
||||
# CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
|
||||
cephFSAttachRequired: true
|
||||
# -- Whether to skip any attach operation altogether for RBD PVCs. See more details
|
||||
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
|
||||
# If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
|
||||
# **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
|
||||
# csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
|
||||
# to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
|
||||
# Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
|
||||
rbdAttachRequired: true
|
||||
# -- Whether to skip any attach operation altogether for NFS PVCs. See more details
|
||||
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
|
||||
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
|
||||
# of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
|
||||
# NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
|
||||
nfsAttachRequired: true
|
||||
|
||||
# -- Enable discovery daemon
|
||||
enableDiscoveryDaemon: false
|
||||
# -- Set the discovery daemon device discovery interval (default to 60m)
|
||||
discoveryDaemonInterval: 60m
|
||||
|
||||
# -- The timeout for ceph commands in seconds
|
||||
cephCommandsTimeoutSeconds: "15"
|
||||
|
||||
# -- If true, run rook operator on the host network
|
||||
useOperatorHostNetwork:
|
||||
|
||||
# -- If true, scale down the rook operator.
|
||||
# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
|
||||
# to deploy your helm charts.
|
||||
scaleDownOperator: false
|
||||
|
||||
## Rook Discover configuration
|
||||
## toleration: NoSchedule, PreferNoSchedule or NoExecute
|
||||
## tolerationKey: Set this to the specific key of the taint to tolerate
|
||||
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
|
||||
## nodeAffinity: Set to labels of the node to match
|
||||
|
||||
discover:
|
||||
# -- Toleration for the discover pods.
|
||||
# Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
|
||||
toleration:
|
||||
# -- The specific key of the taint to tolerate
|
||||
tolerationKey:
|
||||
# -- Array of tolerations in YAML format which will be added to discover deployment
|
||||
tolerations:
|
||||
# - key: key
|
||||
# operator: Exists
|
||||
# effect: NoSchedule
|
||||
# -- The node labels for affinity of `discover-agent` [^1]
|
||||
nodeAffinity:
|
||||
# key1=value1,value2; key2=value3
|
||||
#
|
||||
# or
|
||||
#
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: storage-node
|
||||
# operator: Exists
|
||||
# -- Labels to add to the discover pods
|
||||
podLabels: # "key1=value1,key2=value2"
|
||||
# -- Add resources to discover daemon pods
|
||||
resources:
|
||||
# - limits:
|
||||
# memory: 512Mi
|
||||
# - requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# -- Custom label to identify node hostname. If not set `kubernetes.io/hostname` will be used
|
||||
customHostnameLabel:
|
||||
|
||||
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
|
||||
hostpathRequiresPrivileged: false
|
||||
|
||||
# -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
|
||||
enforceHostNetwork: false
|
||||
|
||||
# -- Disable automatic orchestration when new devices are discovered.
|
||||
disableDeviceHotplug: false
|
||||
|
||||
# -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10.
|
||||
revisionHistoryLimit:
|
||||
|
||||
# -- Blacklist certain disks according to the regex provided.
|
||||
discoverDaemonUdev:
|
||||
|
||||
# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
|
||||
imagePullSecrets:
|
||||
# - name: my-registry-secret
|
||||
|
||||
# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
|
||||
enableOBCWatchOperatorNamespace: true
|
||||
|
||||
# -- Specify the prefix for the OBC provisioner in place of the cluster namespace
|
||||
# @default -- `ceph cluster namespace`
|
||||
obcProvisionerNamePrefix:
|
||||
|
||||
# -- Many OBC additional config fields may be risky for administrators to allow users control over.
|
||||
# The safe and default-allowed fields are 'maxObjects' and 'maxSize'.
|
||||
# Other fields should be considered risky. To allow all additional configs, use this value:
|
||||
# "maxObjects,maxSize,bucketMaxObjects,bucketMaxSize,bucketPolicy,bucketLifecycle,bucketOwner"
|
||||
# @default -- "maxObjects,maxSize"
|
||||
obcAllowAdditionalConfigFields: "maxObjects,maxSize"
|
||||
|
||||
monitoring:
|
||||
# -- Enable monitoring. Requires Prometheus to be pre-installed.
|
||||
# Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
|
||||
enabled: false
|
||||
@@ -1,145 +1,26 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use cidr::Ipv4Cidr;
|
||||
use harmony::{
|
||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||
infra::opnsense::OPNSenseManagementInterface,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
http::HttpScore,
|
||||
ipxe::IpxeScore,
|
||||
okd::{
|
||||
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
|
||||
dns::OKDDnsScore,
|
||||
},
|
||||
tftp::TftpScore,
|
||||
},
|
||||
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
use harmony_macros::{ip, mac_address};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let firewall = harmony::topology::LogicalHost {
|
||||
ip: ip!("192.168.33.1"),
|
||||
name: String::from("fw0"),
|
||||
};
|
||||
|
||||
let opnsense = Arc::new(
|
||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||
);
|
||||
let lan_subnet = Ipv4Addr::new(192, 168, 33, 0);
|
||||
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||
let topology = harmony::topology::HAClusterTopology {
|
||||
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
||||
// when setting up the opnsense firewall
|
||||
router: Arc::new(UnmanagedRouter::new(
|
||||
gateway_ip,
|
||||
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
|
||||
)),
|
||||
load_balancer: opnsense.clone(),
|
||||
firewall: opnsense.clone(),
|
||||
tftp_server: opnsense.clone(),
|
||||
http_server: opnsense.clone(),
|
||||
dhcp_server: opnsense.clone(),
|
||||
dns_server: opnsense.clone(),
|
||||
control_plane: vec![
|
||||
LogicalHost {
|
||||
ip: ip!("192.168.33.20"),
|
||||
name: "cp0".to_string(),
|
||||
},
|
||||
LogicalHost {
|
||||
ip: ip!("192.168.33.21"),
|
||||
name: "cp1".to_string(),
|
||||
},
|
||||
LogicalHost {
|
||||
ip: ip!("192.168.33.22"),
|
||||
name: "cp2".to_string(),
|
||||
},
|
||||
],
|
||||
bootstrap_host: LogicalHost {
|
||||
ip: ip!("192.168.33.66"),
|
||||
name: "bootstrap".to_string(),
|
||||
},
|
||||
workers: vec![
|
||||
LogicalHost {
|
||||
ip: ip!("192.168.33.30"),
|
||||
name: "wk0".to_string(),
|
||||
},
|
||||
LogicalHost {
|
||||
ip: ip!("192.168.33.31"),
|
||||
name: "wk1".to_string(),
|
||||
},
|
||||
LogicalHost {
|
||||
ip: ip!("192.168.33.32"),
|
||||
name: "wk2".to_string(),
|
||||
},
|
||||
],
|
||||
switch: vec![],
|
||||
};
|
||||
|
||||
let inventory = Inventory {
|
||||
location: Location::new("I am mobile".to_string(), "earth".to_string()),
|
||||
switch: SwitchGroup::from([]),
|
||||
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||
storage_host: vec![],
|
||||
worker_host: vec![
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
.mac_address(mac_address!("C4:62:37:02:61:0F")),
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
.mac_address(mac_address!("C4:62:37:02:61:26")),
|
||||
// thisone
|
||||
// Then create the ipxe file
|
||||
// set the dns static leases
|
||||
// bootstrap nodes
|
||||
// start ceph cluster
|
||||
// try installation of lampscore
|
||||
// bingo?
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
.mac_address(mac_address!("C4:62:37:02:61:70")),
|
||||
],
|
||||
control_plane_host: vec![
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
.mac_address(mac_address!("C4:62:37:02:60:FA")),
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
.mac_address(mac_address!("C4:62:37:02:61:1A")),
|
||||
PhysicalHost::empty(HostCategory::Server)
|
||||
.mac_address(mac_address!("C4:62:37:01:BC:68")),
|
||||
],
|
||||
};
|
||||
|
||||
// TODO regroup smaller scores in a larger one such as this
|
||||
// let okd_boostrap_preparation();
|
||||
|
||||
let bootstrap_dhcp_score = OKDBootstrapDhcpScore::new(&topology, &inventory);
|
||||
let bootstrap_load_balancer_score = OKDBootstrapLoadBalancerScore::new(&topology);
|
||||
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
|
||||
let dns_score = OKDDnsScore::new(&topology);
|
||||
let load_balancer_score =
|
||||
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
||||
|
||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||
let http_score = HttpScore::new(Url::LocalFolder(
|
||||
"./data/watchguard/pxe-http-files".to_string(),
|
||||
));
|
||||
let ipxe_score = IpxeScore::new();
|
||||
let inventory = Inventory::autoload();
|
||||
let topology = HAClusterTopology::autoload();
|
||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||
|
||||
maestro.register_all(vec![
|
||||
Box::new(dns_score),
|
||||
Box::new(bootstrap_dhcp_score),
|
||||
Box::new(bootstrap_load_balancer_score),
|
||||
Box::new(load_balancer_score),
|
||||
Box::new(tftp_score),
|
||||
Box::new(http_score),
|
||||
Box::new(ipxe_score),
|
||||
Box::new(dhcp_score),
|
||||
// ADD scores :
|
||||
// 1. OPNSense setup scores
|
||||
// 2. Bootstrap node setup
|
||||
// 3. Control plane setup
|
||||
// 4. Workers setup
|
||||
// 5. Various tools and apps setup
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
]);
|
||||
harmony_tui::init(maestro).await.unwrap();
|
||||
}
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
[package]
|
||||
name = "example-tenant"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
@@ -1,41 +0,0 @@
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::tenant::TenantScore,
|
||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let tenant = TenantScore {
|
||||
config: TenantConfig {
|
||||
id: Id::from_str("test-tenant-id"),
|
||||
name: "testtenant".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(tenant)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
||||
// TODO write tests
|
||||
// - Create Tenant with default config mostly, make sure namespace is created
|
||||
// - deploy sample client/server app with nginx unprivileged and a service
|
||||
// - exec in the client pod and validate the following
|
||||
// - can reach internet
|
||||
// - can reach server pod
|
||||
// - can resolve dns queries to internet
|
||||
// - can resolve dns queries to services
|
||||
// - cannot reach services and pods in other namespaces
|
||||
// - Create Tenant with specific cpu/ram/storage requests / limits and make sure they are enforced by trying to
|
||||
// deploy a pod with lower requests/limits (accepted) and higher requests/limits (rejected)
|
||||
// - Create TenantCredentials and make sure they give only access to the correct tenant
|
||||
@@ -6,8 +6,6 @@ readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
rand = "0.9"
|
||||
hex = "0.4"
|
||||
libredfish = "0.1.1"
|
||||
reqwest = { version = "0.11", features = ["blocking", "json"] }
|
||||
russh = "0.45.0"
|
||||
@@ -15,23 +13,23 @@ rust-ipmi = "0.1.1"
|
||||
semver = "1.0.23"
|
||||
serde = { version = "1.0.209", features = ["derive"] }
|
||||
serde_json = "1.0.127"
|
||||
tokio.workspace = true
|
||||
derive-new.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
async-trait.workspace = true
|
||||
cidr.workspace = true
|
||||
tokio = { workspace = true }
|
||||
derive-new = { workspace = true }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
cidr = { workspace = true }
|
||||
opnsense-config = { path = "../opnsense-config" }
|
||||
opnsense-config-xml = { path = "../opnsense-config-xml" }
|
||||
harmony_macros = { path = "../harmony_macros" }
|
||||
harmony_types = { path = "../harmony_types" }
|
||||
uuid.workspace = true
|
||||
url.workspace = true
|
||||
kube.workspace = true
|
||||
k8s-openapi.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
http.workspace = true
|
||||
serde-value.workspace = true
|
||||
uuid = { workspace = true }
|
||||
url = { workspace = true }
|
||||
kube = { workspace = true }
|
||||
k8s-openapi = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
http = { workspace = true }
|
||||
serde-value = { workspace = true }
|
||||
inquire.workspace = true
|
||||
helm-wrapper-rs = "0.4.0"
|
||||
non-blank-string-rs = "1.0.4"
|
||||
@@ -40,16 +38,3 @@ directories = "6.0.0"
|
||||
lazy_static = "1.5.0"
|
||||
dockerfile_builder = "0.1.5"
|
||||
temp-file = "0.1.9"
|
||||
convert_case.workspace = true
|
||||
email_address = "0.2.9"
|
||||
chrono.workspace = true
|
||||
fqdn = { version = "0.4.6", features = [
|
||||
"domain-label-cannot-start-or-end-with-hyphen",
|
||||
"domain-label-length-limited-to-63",
|
||||
"domain-name-without-special-chars",
|
||||
"domain-name-length-limited-to-255",
|
||||
"punycode",
|
||||
"serde",
|
||||
] }
|
||||
temp-dir = "0.1.14"
|
||||
dyn-clone = "1.0.19"
|
||||
|
||||
@@ -6,8 +6,4 @@ lazy_static! {
|
||||
.unwrap()
|
||||
.data_dir()
|
||||
.join("harmony");
|
||||
pub static ref REGISTRY_URL: String =
|
||||
std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string());
|
||||
pub static ref REGISTRY_PROJECT: String =
|
||||
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
|
||||
}
|
||||
|
||||
@@ -1,24 +1,6 @@
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::distr::SampleString;
|
||||
use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// A unique identifier designed for ease of use.
|
||||
///
|
||||
/// You can pass it any String to use and Id, or you can use the default format with `Id::default()`
|
||||
///
|
||||
/// The default format looks like this
|
||||
///
|
||||
/// `462d4c_g2COgai`
|
||||
///
|
||||
/// The first part is the unix timesamp in hexadecimal which makes Id easily sorted by creation time.
|
||||
/// Second part is a serie of 7 random characters.
|
||||
///
|
||||
/// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per
|
||||
/// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Id {
|
||||
value: String,
|
||||
}
|
||||
@@ -27,31 +9,4 @@ impl Id {
|
||||
pub fn from_string(value: String) -> Self {
|
||||
Self { value }
|
||||
}
|
||||
|
||||
pub fn from_str(value: &str) -> Self {
|
||||
Self::from_string(value.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Id {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Id {
|
||||
fn default() -> Self {
|
||||
let start = SystemTime::now();
|
||||
let since_the_epoch = start
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards");
|
||||
let timestamp = since_the_epoch.as_secs();
|
||||
|
||||
let hex_timestamp = format!("{:x}", timestamp & 0xffffff);
|
||||
|
||||
let random_part: String = Alphanumeric.sample_string(&mut rand::rng(), 7);
|
||||
|
||||
let value = format!("{}_{}", hex_timestamp, random_part);
|
||||
Self { value }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,8 +138,7 @@ impl ManagementInterface for ManualManagementInterface {
|
||||
}
|
||||
|
||||
fn get_supported_protocol_names(&self) -> String {
|
||||
// todo!()
|
||||
"none".to_string()
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,12 +15,10 @@ pub enum InterpretName {
|
||||
LoadBalancer,
|
||||
Tftp,
|
||||
Http,
|
||||
Ipxe,
|
||||
Dummy,
|
||||
Panic,
|
||||
OPNSense,
|
||||
K3dInstallation,
|
||||
TenantInterpret,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for InterpretName {
|
||||
@@ -31,20 +29,22 @@ impl std::fmt::Display for InterpretName {
|
||||
InterpretName::LoadBalancer => f.write_str("LoadBalancer"),
|
||||
InterpretName::Tftp => f.write_str("Tftp"),
|
||||
InterpretName::Http => f.write_str("Http"),
|
||||
InterpretName::Ipxe => f.write_str("iPXE"),
|
||||
InterpretName::Dummy => f.write_str("Dummy"),
|
||||
InterpretName::Panic => f.write_str("Panic"),
|
||||
InterpretName::OPNSense => f.write_str("OPNSense"),
|
||||
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
|
||||
InterpretName::TenantInterpret => f.write_str("Tenant"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Interpret<T>: std::fmt::Debug + Send {
|
||||
async fn execute(&self, inventory: &Inventory, topology: &T)
|
||||
-> Result<Outcome, InterpretError>;
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
profile: &String,
|
||||
) -> Result<Outcome, InterpretError>;
|
||||
fn get_name(&self) -> InterpretName;
|
||||
fn get_version(&self) -> Version;
|
||||
fn get_status(&self) -> InterpretStatus;
|
||||
|
||||
@@ -16,20 +16,23 @@ pub struct Maestro<T: Topology> {
|
||||
topology: T,
|
||||
scores: Arc<RwLock<ScoreVec<T>>>,
|
||||
topology_preparation_result: Mutex<Option<Outcome>>,
|
||||
profile: String,
|
||||
}
|
||||
|
||||
impl<T: Topology> Maestro<T> {
|
||||
pub fn new(inventory: Inventory, topology: T) -> Self {
|
||||
pub fn new(inventory: Inventory, topology: T, profile: String) -> Self {
|
||||
Self {
|
||||
inventory,
|
||||
topology,
|
||||
scores: Arc::new(RwLock::new(Vec::new())),
|
||||
topology_preparation_result: None.into(),
|
||||
profile,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
|
||||
let instance = Self::new(inventory, topology);
|
||||
let profile = "dev".to_string(); // TODO: retrieve from env?
|
||||
let instance = Self::new(inventory, topology, profile);
|
||||
instance.prepare_topology().await?;
|
||||
Ok(instance)
|
||||
}
|
||||
@@ -78,9 +81,11 @@ impl<T: Topology> Maestro<T> {
|
||||
);
|
||||
}
|
||||
info!("Running score {score:?}");
|
||||
let interpret = score.create_interpret();
|
||||
let interpret = score.apply_profile(&self.profile).create_interpret();
|
||||
info!("Launching interpret {interpret:?}");
|
||||
let result = interpret.execute(&self.inventory, &self.topology).await;
|
||||
let result = interpret
|
||||
.execute(&self.inventory, &self.topology, &self.profile)
|
||||
.await;
|
||||
info!("Got result {result:?}");
|
||||
result
|
||||
}
|
||||
|
||||
@@ -8,6 +8,9 @@ use super::{interpret::Interpret, topology::Topology};
|
||||
pub trait Score<T: Topology>:
|
||||
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
|
||||
{
|
||||
fn apply_profile(&self, profile: &String) -> Box<dyn Score<T>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
@@ -168,16 +168,6 @@ impl DhcpServer for HAClusterTopology {
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.commit_config().await
|
||||
}
|
||||
|
||||
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_filename(filename).await
|
||||
}
|
||||
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_filename64(filename64).await
|
||||
}
|
||||
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
|
||||
self.dhcp_server.set_filenameipxe(filenameipxe).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -303,15 +293,6 @@ impl DhcpServer for DummyInfra {
|
||||
async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_filename64(&self, _filename: &str) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn set_filenameipxe(&self, _filenameipxe: &str) -> Result<(), ExecutorError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
fn get_ip(&self) -> IpAddress {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::{interpret::InterpretError, inventory::Inventory};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Installable<T>: Send + Sync {
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<(), InterpretError>;
|
||||
}
|
||||
@@ -1,11 +1,6 @@
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{ClusterResourceScope, NamespaceResourceScope};
|
||||
use kube::{
|
||||
Api, Client, Config, Error, Resource,
|
||||
api::{Patch, PatchParams},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
use k8s_openapi::NamespaceResourceScope;
|
||||
use kube::{Api, Client, Error, Resource, api::PostParams};
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
#[derive(new)]
|
||||
@@ -20,99 +15,43 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
/// Apply a resource in namespace
|
||||
///
|
||||
/// See `kubectl apply` for more information on the expected behavior of this function
|
||||
pub async fn apply<K>(&self, resource: &K, namespace: Option<&str>) -> Result<K, Error>
|
||||
pub async fn apply_all<
|
||||
K: Resource<Scope = NamespaceResourceScope>
|
||||
+ std::fmt::Debug
|
||||
+ Sync
|
||||
+ DeserializeOwned
|
||||
+ Default
|
||||
+ serde::Serialize
|
||||
+ Clone,
|
||||
>(
|
||||
&self,
|
||||
resource: &Vec<K>,
|
||||
) -> Result<Vec<K>, kube::Error>
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
debug!(
|
||||
"Applying resource {:?} with ns {:?}",
|
||||
resource.meta().name,
|
||||
namespace
|
||||
);
|
||||
trace!(
|
||||
"{:#}",
|
||||
serde_json::to_value(resource).unwrap_or(serde_json::Value::Null)
|
||||
);
|
||||
|
||||
let api: Api<K> =
|
||||
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||
// api.create(&PostParams::default(), &resource).await
|
||||
let patch_params = PatchParams::apply("harmony");
|
||||
let name = resource
|
||||
.meta()
|
||||
.name
|
||||
.as_ref()
|
||||
.expect("K8s Resource should have a name");
|
||||
api.patch(name, &patch_params, &Patch::Apply(resource))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
let mut result = Vec::new();
|
||||
let mut result = vec![];
|
||||
for r in resource.iter() {
|
||||
result.push(self.apply(r, ns).await?);
|
||||
let api: Api<K> = Api::all(self.client.clone());
|
||||
result.push(api.create(&PostParams::default(), &r).await?);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||
let k = match Kubeconfig::read_from(path) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
error!("Failed to load kubeconfig from {path} : {e}");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
Some(K8sClient::new(
|
||||
Client::try_from(
|
||||
Config::from_custom_kubeconfig(k, &KubeConfigOptions::default())
|
||||
.await
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ApplyStrategy<K: Resource> {
|
||||
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
||||
}
|
||||
|
||||
/// Implementation for all resources that are cluster-scoped.
|
||||
/// It will always use `Api::all` and ignore the namespace parameter.
|
||||
impl<K> ApplyStrategy<K> for ClusterResourceScope
|
||||
where
|
||||
K: Resource<Scope = ClusterResourceScope>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
fn get_api(client: &Client, _ns: Option<&str>) -> Api<K> {
|
||||
Api::all(client.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation for all resources that are namespace-scoped.
|
||||
/// It will use `Api::namespaced` if a namespace is provided, otherwise
|
||||
/// it falls back to the default namespace configured in your kubeconfig.
|
||||
impl<K> ApplyStrategy<K> for NamespaceResourceScope
|
||||
where
|
||||
K: Resource<Scope = NamespaceResourceScope>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
fn get_api(client: &Client, ns: Option<&str>) -> Api<K> {
|
||||
match ns {
|
||||
Some(ns) => Api::namespaced(client.clone(), ns),
|
||||
None => Api::default_namespaced(client.clone()),
|
||||
pub async fn apply_namespaced<K>(&self, resource: &Vec<K>) -> Result<K, Error>
|
||||
where
|
||||
K: Resource<Scope = NamespaceResourceScope>
|
||||
+ Clone
|
||||
+ std::fmt::Debug
|
||||
+ DeserializeOwned
|
||||
+ serde::Serialize
|
||||
+ Default,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
for r in resource.iter() {
|
||||
let api: Api<K> = Api::default_namespaced(self.client.clone());
|
||||
api.create(&PostParams::default(), &r).await?;
|
||||
}
|
||||
todo!("")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,10 @@ use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use inquire::Confirm;
|
||||
use log::{debug, info, warn};
|
||||
use log::{info, warn};
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
interpret::{InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
@@ -14,11 +13,7 @@ use crate::{
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
|
||||
use super::{
|
||||
HelmCommand, K8sclient, Topology,
|
||||
k8s::K8sClient,
|
||||
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
|
||||
};
|
||||
use super::{HelmCommand, K8sclient, Topology, k8s::K8sClient};
|
||||
|
||||
struct K8sState {
|
||||
client: Arc<K8sClient>,
|
||||
@@ -26,16 +21,12 @@ struct K8sState {
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum K8sSource {
|
||||
LocalK3d,
|
||||
Kubeconfig,
|
||||
}
|
||||
|
||||
pub struct K8sAnywhereTopology {
|
||||
k8s_state: OnceCell<Option<K8sState>>,
|
||||
tenant_manager: OnceCell<K8sTenantManager>,
|
||||
config: K8sAnywhereConfig,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -56,19 +47,9 @@ impl K8sclient for K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
impl K8sAnywhereTopology {
|
||||
pub fn from_env() -> Self {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
k8s_state: OnceCell::new(),
|
||||
tenant_manager: OnceCell::new(),
|
||||
config: K8sAnywhereConfig::from_env(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_config(config: K8sAnywhereConfig) -> Self {
|
||||
Self {
|
||||
k8s_state: OnceCell::new(),
|
||||
tenant_manager: OnceCell::new(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,7 +75,7 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
async fn try_load_kubeconfig(&self, path: &str) -> Option<K8sClient> {
|
||||
K8sClient::from_kubeconfig(path).await
|
||||
todo!("Use kube-rs to load kubeconfig at path {path}");
|
||||
}
|
||||
|
||||
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
|
||||
@@ -109,29 +90,25 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
|
||||
let k8s_anywhere_config = &self.config;
|
||||
let k8s_anywhere_config = K8sAnywhereConfig {
|
||||
kubeconfig: std::env::var("HARMONY_KUBECONFIG")
|
||||
.ok()
|
||||
.map(|v| v.to_string()),
|
||||
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
};
|
||||
|
||||
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
|
||||
debug!("Loading kubeconfig {kubeconfig}");
|
||||
match self.try_load_kubeconfig(&kubeconfig).await {
|
||||
Some(client) => {
|
||||
return Ok(Some(K8sState {
|
||||
client: Arc::new(client),
|
||||
_source: K8sSource::Kubeconfig,
|
||||
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
|
||||
}));
|
||||
}
|
||||
None => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to load kubeconfig from {kubeconfig}"
|
||||
)));
|
||||
}
|
||||
if k8s_anywhere_config.use_system_kubeconfig {
|
||||
match self.try_load_system_kubeconfig().await {
|
||||
Some(_client) => todo!(),
|
||||
None => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
if k8s_anywhere_config.use_system_kubeconfig {
|
||||
debug!("Loading system kubeconfig");
|
||||
match self.try_load_system_kubeconfig().await {
|
||||
if let Some(kubeconfig) = k8s_anywhere_config.kubeconfig {
|
||||
match self.try_load_kubeconfig(&kubeconfig).await {
|
||||
Some(_client) => todo!(),
|
||||
None => todo!(),
|
||||
}
|
||||
@@ -173,64 +150,27 @@ impl K8sAnywhereTopology {
|
||||
|
||||
Ok(Some(state))
|
||||
}
|
||||
|
||||
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
|
||||
if let Some(_) = self.tenant_manager.get() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.tenant_manager
|
||||
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
|
||||
let k8s_client = self.k8s_client().await?;
|
||||
Ok(K8sTenantManager::new(k8s_client))
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> {
|
||||
match self.tenant_manager.get() {
|
||||
Some(t) => Ok(t),
|
||||
None => Err(ExecutorError::UnexpectedError(
|
||||
"K8sTenantManager not available".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct K8sAnywhereConfig {
|
||||
struct K8sAnywhereConfig {
|
||||
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
|
||||
/// cluster
|
||||
///
|
||||
/// Default : None
|
||||
pub kubeconfig: Option<String>,
|
||||
kubeconfig: Option<String>,
|
||||
|
||||
/// Whether to use the system KUBECONFIG, either the environment variable or the file in the
|
||||
/// default or configured location
|
||||
///
|
||||
/// Default : false
|
||||
pub use_system_kubeconfig: bool,
|
||||
use_system_kubeconfig: bool,
|
||||
|
||||
/// Whether to install automatically a kubernetes cluster
|
||||
///
|
||||
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
|
||||
///
|
||||
/// Default: true
|
||||
pub autoinstall: bool,
|
||||
}
|
||||
|
||||
impl K8sAnywhereConfig {
|
||||
fn from_env() -> Self {
|
||||
Self {
|
||||
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
|
||||
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
}
|
||||
}
|
||||
autoinstall: bool,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -249,10 +189,6 @@ impl Topology for K8sAnywhereTopology {
|
||||
"No K8s client could be found or installed".to_string(),
|
||||
))?;
|
||||
|
||||
self.ensure_k8s_tenant_manager()
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e))?;
|
||||
|
||||
match self.is_helm_available() {
|
||||
Ok(()) => Ok(Outcome::success(format!(
|
||||
"{} + helm available",
|
||||
@@ -264,12 +200,3 @@ impl Topology for K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
impl HelmCommand for K8sAnywhereTopology {}
|
||||
|
||||
#[async_trait]
|
||||
impl TenantManager for K8sAnywhereTopology {
|
||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
||||
self.get_k8s_tenant_manager()?
|
||||
.provision_tenant(config)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,12 +7,6 @@ use serde::Serialize;
|
||||
use super::{IpAddress, LogicalHost};
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
impl std::fmt::Debug for dyn LoadBalancer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait LoadBalancer: Send + Sync {
|
||||
fn get_ip(&self) -> IpAddress;
|
||||
@@ -38,6 +32,11 @@ pub trait LoadBalancer: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for dyn LoadBalancer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("LoadBalancer {}", self.get_ip()))
|
||||
}
|
||||
}
|
||||
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||
pub struct LoadBalancerService {
|
||||
pub backend_servers: Vec<BackendServer>,
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
mod ha_cluster;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
pub mod installable;
|
||||
mod k8s_anywhere;
|
||||
mod localhost;
|
||||
pub mod oberservability;
|
||||
pub mod tenant;
|
||||
pub use k8s_anywhere::*;
|
||||
pub use localhost::*;
|
||||
pub mod k8s;
|
||||
|
||||
@@ -53,9 +53,6 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug {
|
||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
||||
async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>;
|
||||
async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>;
|
||||
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>;
|
||||
async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError>;
|
||||
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError>;
|
||||
fn get_ip(&self) -> IpAddress;
|
||||
fn get_host(&self) -> LogicalHost;
|
||||
async fn commit_config(&self) -> Result<(), ExecutorError>;
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
pub mod monitoring;
|
||||
@@ -1,76 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::{Topology, installable::Installable},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
pub trait AlertSender: Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AlertingInterpret<S: AlertSender> {
|
||||
pub sender: S,
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
||||
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInterpret<S> {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
for receiver in self.receivers.iter() {
|
||||
receiver.install(&self.sender).await?;
|
||||
}
|
||||
for rule in self.rules.iter() {
|
||||
debug!("installing rule: {:#?}", rule);
|
||||
rule.install(&self.sender).await?;
|
||||
}
|
||||
self.sender.ensure_installed(inventory, topology).await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"successfully installed alert sender {}",
|
||||
self.sender.name()
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
||||
fn clone_box(&self) -> Box<dyn AlertRule<S>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ScrapeTarger<S: AlertSender> {
|
||||
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
|
||||
}
|
||||
@@ -1,327 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::k8s::{ApplyStrategy, K8sClient},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{
|
||||
api::{
|
||||
core::v1::{Namespace, ResourceQuota},
|
||||
networking::v1::{
|
||||
NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyIngressRule, NetworkPolicyPort,
|
||||
},
|
||||
},
|
||||
apimachinery::pkg::util::intstr::IntOrString,
|
||||
};
|
||||
use kube::Resource;
|
||||
use log::{debug, info, warn};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
|
||||
use super::{TenantConfig, TenantManager};
|
||||
|
||||
#[derive(new)]
|
||||
pub struct K8sTenantManager {
|
||||
k8s_client: Arc<K8sClient>,
|
||||
}
|
||||
|
||||
impl K8sTenantManager {
|
||||
fn get_namespace_name(&self, config: &TenantConfig) -> String {
|
||||
config.name.clone()
|
||||
}
|
||||
|
||||
fn ensure_constraints(&self, _namespace: &Namespace) -> Result<(), ExecutorError> {
|
||||
warn!("Validate that when tenant already exists (by id) that name has not changed");
|
||||
warn!("Make sure other Tenant constraints are respected by this k8s implementation");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn apply_resource<
|
||||
K: Resource + std::fmt::Debug + Sync + DeserializeOwned + Default + serde::Serialize + Clone,
|
||||
>(
|
||||
&self,
|
||||
mut resource: K,
|
||||
config: &TenantConfig,
|
||||
) -> Result<K, ExecutorError>
|
||||
where
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
<K as kube::Resource>::Scope: ApplyStrategy<K>,
|
||||
{
|
||||
self.apply_labels(&mut resource, config);
|
||||
self.k8s_client
|
||||
.apply(&resource, Some(&self.get_namespace_name(config)))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ExecutorError::UnexpectedError(format!("Could not create Tenant resource : {e}"))
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_labels<K: Resource>(&self, resource: &mut K, config: &TenantConfig) {
|
||||
let labels = resource.meta_mut().labels.get_or_insert_default();
|
||||
labels.insert(
|
||||
"app.kubernetes.io/managed-by".to_string(),
|
||||
"harmony".to_string(),
|
||||
);
|
||||
labels.insert("harmony/tenant-id".to_string(), config.id.to_string());
|
||||
labels.insert("harmony/tenant-name".to_string(), config.name.clone());
|
||||
}
|
||||
|
||||
fn build_namespace(&self, config: &TenantConfig) -> Result<Namespace, ExecutorError> {
|
||||
let namespace = json!(
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"harmony.nationtech.io/tenant.id": config.id.to_string(),
|
||||
"harmony.nationtech.io/tenant.name": config.name,
|
||||
},
|
||||
"name": self.get_namespace_name(config),
|
||||
},
|
||||
}
|
||||
);
|
||||
serde_json::from_value(namespace).map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
"Could not build TenantManager Namespace. {}",
|
||||
e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn build_resource_quota(&self, config: &TenantConfig) -> Result<ResourceQuota, ExecutorError> {
|
||||
let resource_quota = json!(
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ResourceQuota",
|
||||
"metadata": {
|
||||
"name": format!("{}-quota", config.name),
|
||||
"labels": {
|
||||
"harmony.nationtech.io/tenant.id": config.id.to_string(),
|
||||
"harmony.nationtech.io/tenant.name": config.name,
|
||||
},
|
||||
"namespace": self.get_namespace_name(config),
|
||||
},
|
||||
"spec": {
|
||||
"hard": {
|
||||
"limits.cpu": format!("{:.0}",config.resource_limits.cpu_limit_cores),
|
||||
"limits.memory": format!("{:.3}Gi", config.resource_limits.memory_limit_gb),
|
||||
"requests.cpu": format!("{:.0}",config.resource_limits.cpu_request_cores),
|
||||
"requests.memory": format!("{:.3}Gi", config.resource_limits.memory_request_gb),
|
||||
"requests.storage": format!("{:.3}Gi", config.resource_limits.storage_total_gb),
|
||||
"pods": "20",
|
||||
"services": "10",
|
||||
"configmaps": "30",
|
||||
"secrets": "30",
|
||||
"persistentvolumeclaims": "15",
|
||||
"services.loadbalancers": "2",
|
||||
"services.nodeports": "5",
|
||||
"limits.ephemeral-storage": "10Gi",
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
);
|
||||
serde_json::from_value(resource_quota).map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
"Could not build TenantManager ResourceQuota. {}",
|
||||
e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn build_network_policy(&self, config: &TenantConfig) -> Result<NetworkPolicy, ExecutorError> {
|
||||
let network_policy = json!({
|
||||
"apiVersion": "networking.k8s.io/v1",
|
||||
"kind": "NetworkPolicy",
|
||||
"metadata": {
|
||||
"name": format!("{}-network-policy", config.name),
|
||||
},
|
||||
"spec": {
|
||||
"podSelector": {},
|
||||
"egress": [
|
||||
{ "to": [ {"podSelector": {}}]},
|
||||
{ "to":
|
||||
[
|
||||
{
|
||||
"podSelector": {},
|
||||
"namespaceSelector": {
|
||||
"matchLabels": {
|
||||
"kubernetes.io/metadata.name":"openshift-dns"
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
},
|
||||
{ "to": [
|
||||
{
|
||||
"ipBlock": {
|
||||
|
||||
"cidr": "0.0.0.0/0",
|
||||
// See https://en.wikipedia.org/wiki/Reserved_IP_addresses
|
||||
"except": [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"192.0.0.0/24",
|
||||
"192.0.2.0/24",
|
||||
"192.88.99.0/24",
|
||||
"192.18.0.0/15",
|
||||
"198.51.100.0/24",
|
||||
"169.254.0.0/16",
|
||||
"203.0.113.0/24",
|
||||
"127.0.0.0/8",
|
||||
|
||||
// Not sure we should block this one as it is
|
||||
// used for multicast. But better block more than less.
|
||||
"224.0.0.0/4",
|
||||
"240.0.0.0/4",
|
||||
"100.64.0.0/10",
|
||||
"233.252.0.0/24",
|
||||
"0.0.0.0/8",
|
||||
],
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
"ingress": [
|
||||
{ "from": [ {"podSelector": {}}]}
|
||||
],
|
||||
"policyTypes": [
|
||||
"Ingress", "Egress",
|
||||
]
|
||||
}
|
||||
});
|
||||
|
||||
let mut network_policy: NetworkPolicy =
|
||||
serde_json::from_value(network_policy).map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
"Could not build TenantManager NetworkPolicy. {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
config
|
||||
.network_policy
|
||||
.additional_allowed_cidr_ingress
|
||||
.iter()
|
||||
.try_for_each(|c| -> Result<(), ExecutorError> {
|
||||
let cidr_list: Vec<serde_json::Value> =
|
||||
c.0.iter()
|
||||
.map(|ci| {
|
||||
json!({
|
||||
"ipBlock": {
|
||||
"cidr": ci.to_string(),
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let rule = serde_json::from_value::<NetworkPolicyIngressRule>(json!({
|
||||
"from": cidr_list
|
||||
}))
|
||||
.map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
"Could not build TenantManager NetworkPolicyIngressRule. {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
network_policy
|
||||
.spec
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.ingress
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.push(rule);
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
config
|
||||
.network_policy
|
||||
.additional_allowed_cidr_egress
|
||||
.iter()
|
||||
.try_for_each(|c| -> Result<(), ExecutorError> {
|
||||
let cidr_list: Vec<serde_json::Value> =
|
||||
c.0.iter()
|
||||
.map(|ci| {
|
||||
json!({
|
||||
"ipBlock": {
|
||||
"cidr": ci.to_string(),
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let ports: Option<Vec<NetworkPolicyPort>> =
|
||||
c.1.as_ref().map(|spec| match &spec.data {
|
||||
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(port.clone().into())),
|
||||
..Default::default()
|
||||
}],
|
||||
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(start.clone().into())),
|
||||
end_port: Some(end.clone().into()),
|
||||
protocol: None, // Not currently supported by Harmony
|
||||
}],
|
||||
|
||||
super::PortSpecData::ListOfPorts(items) => items
|
||||
.iter()
|
||||
.map(|i| NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(i.clone().into())),
|
||||
..Default::default()
|
||||
})
|
||||
.collect(),
|
||||
});
|
||||
let rule = serde_json::from_value::<NetworkPolicyEgressRule>(json!({
|
||||
"to": cidr_list,
|
||||
"ports": ports,
|
||||
}))
|
||||
.map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
"Could not build TenantManager NetworkPolicyEgressRule. {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
network_policy
|
||||
.spec
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.egress
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.push(rule);
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(network_policy)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TenantManager for K8sTenantManager {
|
||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
||||
let namespace = self.build_namespace(config)?;
|
||||
let resource_quota = self.build_resource_quota(config)?;
|
||||
let network_policy = self.build_network_policy(config)?;
|
||||
|
||||
self.ensure_constraints(&namespace)?;
|
||||
|
||||
debug!("Creating namespace for tenant {}", config.name);
|
||||
self.apply_resource(namespace, config).await?;
|
||||
|
||||
debug!("Creating resource_quota for tenant {}", config.name);
|
||||
self.apply_resource(resource_quota, config).await?;
|
||||
|
||||
debug!("Creating network_policy for tenant {}", config.name);
|
||||
self.apply_resource(network_policy, config).await?;
|
||||
|
||||
info!(
|
||||
"Success provisionning K8s tenant id {} name {}",
|
||||
config.id, config.name
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
use super::*;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
#[async_trait]
|
||||
pub trait TenantManager {
|
||||
/// Creates or update tenant based on the provided configuration.
|
||||
/// This operation should be idempotent; if a tenant with the same `config.id`
|
||||
/// already exists and matches the config, it will succeed without changes.
|
||||
///
|
||||
/// If it exists but differs, it will be updated, or return an error if the update
|
||||
/// action is not supported
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `config`: The desired configuration for the new tenant.
|
||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>;
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
pub mod k8s;
|
||||
mod manager;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub use manager::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::data::Id;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
|
||||
pub struct TenantConfig {
|
||||
/// This will be used as the primary unique identifier for management operations and will never
|
||||
/// change for the entire lifetime of the tenant
|
||||
pub id: Id,
|
||||
|
||||
/// A human-readable name for the tenant (e.g., "client-alpha", "project-phoenix").
|
||||
pub name: String,
|
||||
|
||||
/// Desired resource allocations and limits for the tenant.
|
||||
pub resource_limits: ResourceLimits,
|
||||
|
||||
/// High-level network isolation policies for the tenant.
|
||||
pub network_policy: TenantNetworkPolicy,
|
||||
}
|
||||
|
||||
impl Default for TenantConfig {
|
||||
fn default() -> Self {
|
||||
let id = Id::default();
|
||||
Self {
|
||||
name: format!("tenant_{id}"),
|
||||
id,
|
||||
resource_limits: ResourceLimits::default(),
|
||||
network_policy: TenantNetworkPolicy {
|
||||
default_inter_tenant_ingress: InterTenantIngressPolicy::DenyAll,
|
||||
default_internet_egress: InternetEgressPolicy::AllowAll,
|
||||
additional_allowed_cidr_ingress: vec![],
|
||||
additional_allowed_cidr_egress: vec![],
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ResourceLimits {
|
||||
/// Requested/guaranteed CPU cores (e.g., 2.0).
|
||||
pub cpu_request_cores: f32,
|
||||
/// Maximum CPU cores the tenant can burst to (e.g., 4.0).
|
||||
pub cpu_limit_cores: f32,
|
||||
|
||||
/// Requested/guaranteed memory in Gigabytes (e.g., 8.0).
|
||||
pub memory_request_gb: f32,
|
||||
/// Maximum memory in Gigabytes tenant can burst to (e.g., 16.0).
|
||||
pub memory_limit_gb: f32,
|
||||
|
||||
/// Total persistent storage allocation in Gigabytes across all volumes.
|
||||
pub storage_total_gb: f32,
|
||||
}
|
||||
|
||||
impl Default for ResourceLimits {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
cpu_request_cores: 4.0,
|
||||
cpu_limit_cores: 4.0,
|
||||
memory_request_gb: 4.0,
|
||||
memory_limit_gb: 4.0,
|
||||
storage_total_gb: 20.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TenantNetworkPolicy {
|
||||
/// Policy for ingress traffic originating from other tenants within the same Harmony-managed environment.
|
||||
pub default_inter_tenant_ingress: InterTenantIngressPolicy,
|
||||
|
||||
/// Policy for egress traffic destined for the public internet.
|
||||
pub default_internet_egress: InternetEgressPolicy,
|
||||
|
||||
pub additional_allowed_cidr_ingress: Vec<(Vec<cidr::Ipv4Cidr>, Option<PortSpec>)>,
|
||||
pub additional_allowed_cidr_egress: Vec<(Vec<cidr::Ipv4Cidr>, Option<PortSpec>)>,
|
||||
}
|
||||
|
||||
impl Default for TenantNetworkPolicy {
|
||||
fn default() -> Self {
|
||||
TenantNetworkPolicy {
|
||||
default_inter_tenant_ingress: InterTenantIngressPolicy::DenyAll,
|
||||
default_internet_egress: InternetEgressPolicy::DenyAll,
|
||||
additional_allowed_cidr_ingress: vec![],
|
||||
additional_allowed_cidr_egress: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum InterTenantIngressPolicy {
|
||||
/// Deny all traffic from other tenants by default.
|
||||
DenyAll,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum InternetEgressPolicy {
|
||||
/// Allow all outbound traffic to the internet.
|
||||
AllowAll,
|
||||
/// Deny all outbound traffic to the internet by default.
|
||||
DenyAll,
|
||||
}
|
||||
|
||||
/// Represents a port specification that can be either a single port, a comma-separated list of ports,
|
||||
/// or a range separated by a dash.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct PortSpec {
|
||||
/// The actual representation of the ports as strings for serialization/deserialization purposes.
|
||||
pub data: PortSpecData,
|
||||
}
|
||||
|
||||
impl PortSpec {
|
||||
/// TODO write short rust doc that shows what types of input are supported
|
||||
fn parse_from_str(spec: &str) -> Result<PortSpec, String> {
|
||||
// Check for single port
|
||||
if let Ok(port) = spec.parse::<u16>() {
|
||||
let spec = PortSpecData::SinglePort(port);
|
||||
return Ok(Self { data: spec });
|
||||
}
|
||||
|
||||
if let Some(range) = spec.find('-') {
|
||||
let start_str = &spec[..range];
|
||||
let end_str = &spec[(range + 1)..];
|
||||
|
||||
if let (Ok(start), Ok(end)) = (start_str.parse::<u16>(), end_str.parse::<u16>()) {
|
||||
let spec = PortSpecData::PortRange(start, end);
|
||||
return Ok(Self { data: spec });
|
||||
}
|
||||
}
|
||||
|
||||
let ports: Vec<&str> = spec.split(',').collect();
|
||||
if !ports.is_empty() && ports.iter().all(|p| p.parse::<u16>().is_ok()) {
|
||||
let maybe_ports = ports.iter().try_fold(vec![], |mut list, &p| {
|
||||
if let Ok(p) = p.parse::<u16>() {
|
||||
list.push(p);
|
||||
return Ok(list);
|
||||
}
|
||||
Err(())
|
||||
});
|
||||
|
||||
if let Ok(ports) = maybe_ports {
|
||||
let spec = PortSpecData::ListOfPorts(ports);
|
||||
return Ok(Self { data: spec });
|
||||
}
|
||||
}
|
||||
|
||||
Err(format!("Invalid port spec format {spec}"))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for PortSpec {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::parse_from_str(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum PortSpecData {
|
||||
SinglePort(u16),
|
||||
PortRange(u16, u16),
|
||||
ListOfPorts(Vec<u16>),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_single_port() {
|
||||
let port_spec = "2144".parse::<PortSpec>().unwrap();
|
||||
match port_spec.data {
|
||||
PortSpecData::SinglePort(port) => assert_eq!(port, 2144),
|
||||
_ => panic!("Expected SinglePort"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_port_range() {
|
||||
let port_spec = "80-90".parse::<PortSpec>().unwrap();
|
||||
match port_spec.data {
|
||||
PortSpecData::PortRange(start, end) => {
|
||||
assert_eq!(start, 80);
|
||||
assert_eq!(end, 90);
|
||||
}
|
||||
_ => panic!("Expected PortRange"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_of_ports() {
|
||||
let port_spec = "2144,3424".parse::<PortSpec>().unwrap();
|
||||
match port_spec.data {
|
||||
PortSpecData::ListOfPorts(ports) => {
|
||||
assert_eq!(ports[0], 2144);
|
||||
assert_eq!(ports[1], 3424);
|
||||
}
|
||||
_ => panic!("Expected ListOfPorts"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_port_spec() {
|
||||
let result = "invalid".parse::<PortSpec>();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_input() {
|
||||
let result = "".parse::<PortSpec>();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_only_coma() {
|
||||
let result = ",".parse::<PortSpec>();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -69,34 +69,4 @@ impl DhcpServer for OPNSenseFirewall {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> {
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_filename(filename);
|
||||
debug!("OPNsense dhcp server set filename {filename}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_filename64(&self, filename: &str) -> Result<(), ExecutorError> {
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_filename64(filename);
|
||||
debug!("OPNsense dhcp server set filename {filename}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> {
|
||||
{
|
||||
let mut writable_opnsense = self.opnsense_config.write().await;
|
||||
writable_opnsense.dhcp().set_filenameipxe(filenameipxe);
|
||||
debug!("OPNsense dhcp server set filenameipxe {filenameipxe}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ impl HttpServer for OPNSenseFirewall {
|
||||
info!("Adding custom caddy config files");
|
||||
config
|
||||
.upload_files(
|
||||
"./data/watchguard/caddy_config",
|
||||
"../../../watchguard/caddy_config",
|
||||
"/usr/local/etc/caddy/caddy.d/",
|
||||
)
|
||||
.await
|
||||
|
||||
@@ -370,13 +370,10 @@ mod tests {
|
||||
let result = get_servers_for_backend(&backend, &haproxy);
|
||||
|
||||
// Check the result
|
||||
assert_eq!(
|
||||
result,
|
||||
vec![BackendServer {
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
},]
|
||||
);
|
||||
assert_eq!(result, vec![BackendServer {
|
||||
address: "192.168.1.1".to_string(),
|
||||
port: 80,
|
||||
},]);
|
||||
}
|
||||
#[test]
|
||||
fn test_get_servers_for_backend_no_linked_servers() {
|
||||
@@ -433,18 +430,15 @@ mod tests {
|
||||
// Call the function
|
||||
let result = get_servers_for_backend(&backend, &haproxy);
|
||||
// Check the result
|
||||
assert_eq!(
|
||||
result,
|
||||
vec![
|
||||
BackendServer {
|
||||
address: "some-hostname.test.mcd".to_string(),
|
||||
port: 80,
|
||||
},
|
||||
BackendServer {
|
||||
address: "192.168.1.2".to_string(),
|
||||
port: 8080,
|
||||
},
|
||||
]
|
||||
);
|
||||
assert_eq!(result, vec![
|
||||
BackendServer {
|
||||
address: "some-hostname.test.mcd".to_string(),
|
||||
port: 80,
|
||||
},
|
||||
BackendServer {
|
||||
address: "192.168.1.2".to_string(),
|
||||
port: 8080,
|
||||
},
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use url::Url;
|
||||
|
||||
use crate::{
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
||||
score::Score,
|
||||
topology::{HelmCommand, Topology},
|
||||
};
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct CertManagerHelmScore {}
|
||||
|
||||
impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
let mut values_overrides = HashMap::new();
|
||||
values_overrides.insert(
|
||||
NonBlankString::from_str("crds.enabled").unwrap(),
|
||||
"true".to_string(),
|
||||
);
|
||||
let values_overrides = Some(values_overrides);
|
||||
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str("cert-manager").unwrap()),
|
||||
release_name: NonBlankString::from_str("cert-manager").unwrap(),
|
||||
chart_name: NonBlankString::from_str("jetstack/cert-manager").unwrap(),
|
||||
chart_version: None,
|
||||
values_overrides,
|
||||
values_yaml: None,
|
||||
create_namespace: true,
|
||||
install_only: true,
|
||||
repository: Some(HelmRepository::new(
|
||||
"jetstack".to_string(),
|
||||
Url::parse("https://charts.jetstack.io").unwrap(),
|
||||
true,
|
||||
)),
|
||||
}
|
||||
.create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("CertManagerHelmScore")
|
||||
}
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
mod helm;
|
||||
pub use helm::*;
|
||||
@@ -17,9 +17,6 @@ pub struct DhcpScore {
|
||||
pub host_binding: Vec<HostBinding>,
|
||||
pub next_server: Option<IpAddress>,
|
||||
pub boot_filename: Option<String>,
|
||||
pub filename: Option<String>,
|
||||
pub filename64: Option<String>,
|
||||
pub filenameipxe: Option<String>,
|
||||
}
|
||||
|
||||
impl<T: Topology + DhcpServer> Score<T> for DhcpScore {
|
||||
@@ -120,44 +117,8 @@ impl DhcpInterpret {
|
||||
None => Outcome::noop(),
|
||||
};
|
||||
|
||||
let filename_outcome = match &self.score.filename {
|
||||
Some(filename) => {
|
||||
dhcp_server.set_filename(&filename).await?;
|
||||
Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!("Dhcp Interpret Set filename to {filename}"),
|
||||
)
|
||||
}
|
||||
None => Outcome::noop(),
|
||||
};
|
||||
|
||||
let filename64_outcome = match &self.score.filename64 {
|
||||
Some(filename64) => {
|
||||
dhcp_server.set_filename64(&filename64).await?;
|
||||
Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!("Dhcp Interpret Set filename64 to {filename64}"),
|
||||
)
|
||||
}
|
||||
None => Outcome::noop(),
|
||||
};
|
||||
|
||||
let filenameipxe_outcome = match &self.score.filenameipxe {
|
||||
Some(filenameipxe) => {
|
||||
dhcp_server.set_filenameipxe(&filenameipxe).await?;
|
||||
Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"),
|
||||
)
|
||||
}
|
||||
None => Outcome::noop(),
|
||||
};
|
||||
|
||||
if next_server_outcome.status == InterpretStatus::NOOP
|
||||
&& boot_filename_outcome.status == InterpretStatus::NOOP
|
||||
&& filename_outcome.status == InterpretStatus::NOOP
|
||||
&& filename64_outcome.status == InterpretStatus::NOOP
|
||||
&& filenameipxe_outcome.status == InterpretStatus::NOOP
|
||||
{
|
||||
return Ok(Outcome::noop());
|
||||
}
|
||||
@@ -165,12 +126,8 @@ impl DhcpInterpret {
|
||||
Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!(
|
||||
"Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]",
|
||||
self.score.boot_filename,
|
||||
self.score.boot_filename,
|
||||
self.score.filename,
|
||||
self.score.filename64,
|
||||
self.score.filenameipxe
|
||||
"Dhcp Interpret Set next boot to {:?} and boot_filename to {:?}",
|
||||
self.score.boot_filename, self.score.boot_filename
|
||||
),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -75,6 +75,7 @@ impl<T: Topology> Interpret<T> for DummyInterpret {
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
_profile: &String,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.result.clone()
|
||||
}
|
||||
@@ -121,6 +122,7 @@ impl<T: Topology> Interpret<T> for PanicInterpret {
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
_profile: &String,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
panic!("Panic interpret always panics when executed")
|
||||
}
|
||||
|
||||
@@ -6,31 +6,11 @@ use crate::topology::{HelmCommand, Topology};
|
||||
use async_trait::async_trait;
|
||||
use helm_wrapper_rs;
|
||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
||||
use log::{debug, info, warn};
|
||||
pub use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::process::{Command, Output, Stdio};
|
||||
use std::str::FromStr;
|
||||
use temp_file::TempFile;
|
||||
use url::Url;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct HelmRepository {
|
||||
name: String,
|
||||
url: Url,
|
||||
force_update: bool,
|
||||
}
|
||||
impl HelmRepository {
|
||||
pub fn new(name: String, url: Url, force_update: bool) -> Self {
|
||||
Self {
|
||||
name,
|
||||
url,
|
||||
force_update,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct HelmChartScore {
|
||||
@@ -40,11 +20,6 @@ pub struct HelmChartScore {
|
||||
pub chart_version: Option<NonBlankString>,
|
||||
pub values_overrides: Option<HashMap<NonBlankString, String>>,
|
||||
pub values_yaml: Option<String>,
|
||||
pub create_namespace: bool,
|
||||
|
||||
/// Wether to run `helm upgrade --install` under the hood or only install when not present
|
||||
pub install_only: bool,
|
||||
pub repository: Option<HelmRepository>,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
|
||||
@@ -63,81 +38,6 @@ impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
|
||||
pub struct HelmChartInterpret {
|
||||
pub score: HelmChartScore,
|
||||
}
|
||||
impl HelmChartInterpret {
|
||||
fn add_repo(&self) -> Result<(), InterpretError> {
|
||||
let repo = match &self.score.repository {
|
||||
Some(repo) => repo,
|
||||
None => {
|
||||
info!("No Helm repository specified in the score. Skipping repository setup.");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"Ensuring Helm repository exists: Name='{}', URL='{}', ForceUpdate={}",
|
||||
repo.name, repo.url, repo.force_update
|
||||
);
|
||||
|
||||
let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
|
||||
if repo.force_update {
|
||||
add_args.push("--force-update");
|
||||
}
|
||||
|
||||
let add_output = run_helm_command(&add_args)?;
|
||||
let full_output = format!(
|
||||
"{}\n{}",
|
||||
String::from_utf8_lossy(&add_output.stdout),
|
||||
String::from_utf8_lossy(&add_output.stderr)
|
||||
);
|
||||
|
||||
match add_output.status.success() {
|
||||
true => {
|
||||
return Ok(());
|
||||
}
|
||||
false => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to add helm repository!\n{full_output}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn run_helm_command(args: &[&str]) -> Result<Output, InterpretError> {
|
||||
let command_str = format!("helm {}", args.join(" "));
|
||||
debug!(
|
||||
"Got KUBECONFIG: `{}`",
|
||||
std::env::var("KUBECONFIG").unwrap_or("".to_string())
|
||||
);
|
||||
debug!("Running Helm command: `{}`", command_str);
|
||||
|
||||
let output = Command::new("helm")
|
||||
.args(args)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.output()
|
||||
.map_err(|e| {
|
||||
InterpretError::new(format!(
|
||||
"Failed to execute helm command '{}': {}. Is helm installed and in PATH?",
|
||||
command_str, e
|
||||
))
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
warn!(
|
||||
"Helm command `{}` failed with status: {}\nStdout:\n{}\nStderr:\n{}",
|
||||
command_str, output.status, stdout, stderr
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
"Helm command `{}` finished successfully. Status: {}",
|
||||
command_str, output.status
|
||||
);
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
@@ -161,56 +61,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
None => None,
|
||||
};
|
||||
|
||||
self.add_repo()?;
|
||||
|
||||
let helm_executor = DefaultHelmExecutor::new_with_opts(
|
||||
&NonBlankString::from_str("helm").unwrap(),
|
||||
None,
|
||||
900,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
let mut helm_options = Vec::new();
|
||||
if self.score.create_namespace {
|
||||
helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
|
||||
}
|
||||
|
||||
if self.score.install_only {
|
||||
let chart_list = match helm_executor.list(Some(ns)) {
|
||||
Ok(charts) => charts,
|
||||
Err(e) => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to list scores in namespace {:?} because of error : {}",
|
||||
self.score.namespace, e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
if chart_list
|
||||
.iter()
|
||||
.any(|item| item.name == self.score.release_name.to_string())
|
||||
{
|
||||
info!(
|
||||
"Release '{}' already exists in namespace '{}'. Skipping installation as install_only is true.",
|
||||
self.score.release_name, ns
|
||||
);
|
||||
|
||||
return Ok(Outcome::new(
|
||||
InterpretStatus::SUCCESS,
|
||||
format!(
|
||||
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
||||
self.score.release_name
|
||||
),
|
||||
));
|
||||
} else {
|
||||
info!(
|
||||
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
||||
self.score.release_name, ns
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let helm_executor = DefaultHelmExecutor::new();
|
||||
let res = helm_executor.install_or_upgrade(
|
||||
&ns,
|
||||
&self.score.release_name,
|
||||
@@ -218,7 +69,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
self.score.chart_version.as_ref(),
|
||||
self.score.values_overrides.as_ref(),
|
||||
yaml_path,
|
||||
Some(&helm_options),
|
||||
None,
|
||||
);
|
||||
|
||||
let status = match res {
|
||||
|
||||
@@ -1,376 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Output};
|
||||
use temp_dir::{self, TempDir};
|
||||
use temp_file::TempFile;
|
||||
|
||||
use crate::data::{Id, Version};
|
||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use crate::inventory::Inventory;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{HelmCommand, K8sclient, Topology};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HelmCommandExecutor {
|
||||
pub env: HashMap<String, String>,
|
||||
pub path: Option<PathBuf>,
|
||||
pub args: Vec<String>,
|
||||
pub api_versions: Option<Vec<String>>,
|
||||
pub kube_version: String,
|
||||
pub debug: Option<bool>,
|
||||
pub globals: HelmGlobals,
|
||||
pub chart: HelmChart,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HelmGlobals {
|
||||
pub chart_home: Option<PathBuf>,
|
||||
pub config_home: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct HelmChart {
|
||||
pub name: String,
|
||||
pub version: Option<String>,
|
||||
pub repo: Option<String>,
|
||||
pub release_name: Option<String>,
|
||||
pub namespace: Option<String>,
|
||||
pub additional_values_files: Vec<PathBuf>,
|
||||
pub values_file: Option<PathBuf>,
|
||||
pub values_inline: Option<String>,
|
||||
pub include_crds: Option<bool>,
|
||||
pub skip_hooks: Option<bool>,
|
||||
pub api_versions: Option<Vec<String>>,
|
||||
pub kube_version: Option<String>,
|
||||
pub name_template: String,
|
||||
pub skip_tests: Option<bool>,
|
||||
pub debug: Option<bool>,
|
||||
}
|
||||
|
||||
impl HelmCommandExecutor {
|
||||
pub fn generate(mut self) -> Result<String, std::io::Error> {
|
||||
if self.globals.chart_home.is_none() {
|
||||
self.globals.chart_home = Some(PathBuf::from("charts"));
|
||||
}
|
||||
|
||||
if self
|
||||
.clone()
|
||||
.chart
|
||||
.clone()
|
||||
.chart_exists_locally(self.clone().globals.chart_home.unwrap())
|
||||
.is_none()
|
||||
{
|
||||
if self.chart.repo.is_none() {
|
||||
return Err(std::io::Error::new(
|
||||
ErrorKind::Other,
|
||||
"Chart doesn't exist locally and no repo specified",
|
||||
));
|
||||
}
|
||||
self.clone().run_command(
|
||||
self.chart
|
||||
.clone()
|
||||
.pull_command(self.globals.chart_home.clone().unwrap()),
|
||||
)?;
|
||||
}
|
||||
|
||||
let out = match self.clone().run_command(
|
||||
self.chart
|
||||
.clone()
|
||||
.helm_args(self.globals.chart_home.clone().unwrap()),
|
||||
) {
|
||||
Ok(out) => out,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
// TODO: don't use unwrap here
|
||||
let s = String::from_utf8(out.stdout).unwrap();
|
||||
debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
|
||||
debug!("helm status: {}", out.status);
|
||||
debug!("helm output: {s}");
|
||||
|
||||
let clean = s.split_once("---").unwrap().1;
|
||||
|
||||
Ok(clean.to_string())
|
||||
}
|
||||
|
||||
pub fn version(self) -> Result<String, std::io::Error> {
|
||||
let out = match self.run_command(vec![
|
||||
"version".to_string(),
|
||||
"-c".to_string(),
|
||||
"--short".to_string(),
|
||||
]) {
|
||||
Ok(out) => out,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
// TODO: don't use unwrap
|
||||
Ok(String::from_utf8(out.stdout).unwrap())
|
||||
}
|
||||
|
||||
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
|
||||
if let Some(d) = self.debug {
|
||||
if d {
|
||||
args.push("--debug".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let path = if let Some(p) = self.path {
|
||||
p
|
||||
} else {
|
||||
PathBuf::from("helm")
|
||||
};
|
||||
|
||||
let config_home = match self.globals.config_home {
|
||||
Some(p) => p,
|
||||
None => PathBuf::from(TempDir::new()?.path()),
|
||||
};
|
||||
|
||||
match self.chart.values_inline {
|
||||
Some(yaml_str) => {
|
||||
let tf: TempFile;
|
||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||
self.chart
|
||||
.additional_values_files
|
||||
.push(PathBuf::from(tf.path()));
|
||||
}
|
||||
None => (),
|
||||
};
|
||||
|
||||
self.env.insert(
|
||||
"HELM_CONFIG_HOME".to_string(),
|
||||
config_home.to_str().unwrap().to_string(),
|
||||
);
|
||||
self.env.insert(
|
||||
"HELM_CACHE_HOME".to_string(),
|
||||
config_home.to_str().unwrap().to_string(),
|
||||
);
|
||||
self.env.insert(
|
||||
"HELM_DATA_HOME".to_string(),
|
||||
config_home.to_str().unwrap().to_string(),
|
||||
);
|
||||
|
||||
Command::new(path).envs(self.env).args(args).output()
|
||||
}
|
||||
}
|
||||
|
||||
impl HelmChart {
|
||||
pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
|
||||
let chart_path =
|
||||
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
|
||||
|
||||
if chart_path.exists() {
|
||||
Some(chart_path)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
|
||||
let mut args = vec![
|
||||
"pull".to_string(),
|
||||
"--untar".to_string(),
|
||||
"--untardir".to_string(),
|
||||
chart_home.to_str().unwrap().to_string(),
|
||||
];
|
||||
|
||||
match self.repo {
|
||||
Some(r) => {
|
||||
if r.starts_with("oci://") {
|
||||
args.push(String::from(
|
||||
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
|
||||
));
|
||||
} else {
|
||||
args.push("--repo".to_string());
|
||||
args.push(r.to_string());
|
||||
|
||||
args.push(self.name);
|
||||
}
|
||||
}
|
||||
None => args.push(self.name),
|
||||
};
|
||||
|
||||
match self.version {
|
||||
Some(v) => {
|
||||
args.push("--version".to_string());
|
||||
args.push(v.to_string());
|
||||
}
|
||||
None => (),
|
||||
}
|
||||
|
||||
args
|
||||
}
|
||||
|
||||
pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
|
||||
let mut args: Vec<String> = vec!["template".to_string()];
|
||||
|
||||
match self.release_name {
|
||||
Some(rn) => args.push(rn.to_string()),
|
||||
None => args.push("--generate-name".to_string()),
|
||||
}
|
||||
|
||||
args.push(
|
||||
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
if let Some(n) = self.namespace {
|
||||
args.push("--namespace".to_string());
|
||||
args.push(n.to_string());
|
||||
}
|
||||
|
||||
if let Some(f) = self.values_file {
|
||||
args.push("-f".to_string());
|
||||
args.push(f.to_str().unwrap().to_string());
|
||||
}
|
||||
|
||||
for f in self.additional_values_files {
|
||||
args.push("-f".to_string());
|
||||
args.push(f.to_str().unwrap().to_string());
|
||||
}
|
||||
|
||||
if let Some(vv) = self.api_versions {
|
||||
for v in vv {
|
||||
args.push("--api-versions".to_string());
|
||||
args.push(v);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(kv) = self.kube_version {
|
||||
args.push("--kube-version".to_string());
|
||||
args.push(kv);
|
||||
}
|
||||
|
||||
if let Some(crd) = self.include_crds {
|
||||
if crd {
|
||||
args.push("--include-crds".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(st) = self.skip_tests {
|
||||
if st {
|
||||
args.push("--skip-tests".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(sh) = self.skip_hooks {
|
||||
if sh {
|
||||
args.push("--no-hooks".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(d) = self.debug {
|
||||
if d {
|
||||
args.push("--debug".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
args
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct HelmChartScoreV2 {
|
||||
pub chart: HelmChart,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(HelmChartInterpretV2 {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!(
|
||||
"{} {} HelmChartScoreV2",
|
||||
self.chart
|
||||
.release_name
|
||||
.clone()
|
||||
.unwrap_or("Unknown".to_string()),
|
||||
self.chart.name
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct HelmChartInterpretV2 {
|
||||
pub score: HelmChartScoreV2,
|
||||
}
|
||||
impl HelmChartInterpretV2 {}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let _ns = self
|
||||
.score
|
||||
.chart
|
||||
.namespace
|
||||
.as_ref()
|
||||
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
|
||||
|
||||
let helm_executor = HelmCommandExecutor {
|
||||
env: HashMap::new(),
|
||||
path: None,
|
||||
args: vec![],
|
||||
api_versions: None,
|
||||
kube_version: "v1.33.0".to_string(),
|
||||
debug: Some(false),
|
||||
globals: HelmGlobals {
|
||||
chart_home: None,
|
||||
config_home: None,
|
||||
},
|
||||
chart: self.score.chart.clone(),
|
||||
};
|
||||
|
||||
// let mut helm_options = Vec::new();
|
||||
// if self.score.create_namespace {
|
||||
// helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
|
||||
// }
|
||||
|
||||
let res = helm_executor.generate();
|
||||
|
||||
let _output = match res {
|
||||
Ok(output) => output,
|
||||
Err(err) => return Err(InterpretError::new(err.to_string())),
|
||||
};
|
||||
|
||||
// TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
|
||||
|
||||
// let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
|
||||
|
||||
// let client = topology
|
||||
// .k8s_client()
|
||||
// .await
|
||||
// .expect("Environment should provide enough information to instanciate a client")
|
||||
// .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
|
||||
// match client.apply_yaml(output) {
|
||||
// Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
|
||||
// Err(e) => return Err(InterpretError::new(e)),
|
||||
// }
|
||||
|
||||
Ok(Outcome::success("Helm chart deployed".to_string()))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
todo!()
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,2 +1 @@
|
||||
pub mod chart;
|
||||
pub mod command;
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct IpxeScore {
|
||||
//files_to_serve: Url,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for IpxeScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(IpxeInterpret::new(self.clone()))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"IpxeScore".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, new, Clone)]
|
||||
pub struct IpxeInterpret {
|
||||
_score: IpxeScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Interpret<T> for IpxeInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
/*
|
||||
let http_server = &topology.http_server;
|
||||
http_server.ensure_initialized().await?;
|
||||
Ok(Outcome::success(format!(
|
||||
"Http Server running and serving files from {}",
|
||||
self.score.files_to_serve
|
||||
)))
|
||||
*/
|
||||
todo!();
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Ipxe
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -14,13 +14,11 @@ use super::resource::{K8sResourceInterpret, K8sResourceScore};
|
||||
pub struct K8sDeploymentScore {
|
||||
pub name: String,
|
||||
pub image: String,
|
||||
pub namespace: Option<String>,
|
||||
pub env_vars: serde_json::Value,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for K8sDeploymentScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let deployment = json!(
|
||||
let deployment: Deployment = serde_json::from_value(json!(
|
||||
{
|
||||
"metadata": {
|
||||
"name": self.name
|
||||
@@ -40,21 +38,18 @@ impl<T: Topology + K8sclient> Score<T> for K8sDeploymentScore {
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"image": self.image,
|
||||
"name": self.name,
|
||||
"imagePullPolicy": "Always",
|
||||
"env": self.env_vars,
|
||||
"image": self.image,
|
||||
"name": self.image
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
let deployment: Deployment = serde_json::from_value(deployment).unwrap();
|
||||
))
|
||||
.unwrap();
|
||||
Box::new(K8sResourceInterpret {
|
||||
score: K8sResourceScore::single(deployment.clone(), self.namespace.clone()),
|
||||
score: K8sResourceScore::single(deployment.clone()),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
use harmony_macros::ingress_path;
|
||||
use k8s_openapi::api::networking::v1::Ingress;
|
||||
use log::{debug, trace};
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::{
|
||||
interpret::Interpret,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
|
||||
use super::resource::{K8sResourceInterpret, K8sResourceScore};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum PathType {
|
||||
ImplementationSpecific,
|
||||
Exact,
|
||||
Prefix,
|
||||
}
|
||||
|
||||
impl PathType {
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
PathType::ImplementationSpecific => "ImplementationSpecific",
|
||||
PathType::Exact => "Exact",
|
||||
PathType::Prefix => "Prefix",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type IngressPath = String;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct K8sIngressScore {
|
||||
pub name: fqdn::FQDN,
|
||||
pub host: fqdn::FQDN,
|
||||
pub backend_service: fqdn::FQDN,
|
||||
pub port: u16,
|
||||
pub path: Option<IngressPath>,
|
||||
pub path_type: Option<PathType>,
|
||||
pub namespace: Option<fqdn::FQDN>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let path = match self.path.clone() {
|
||||
Some(p) => p,
|
||||
None => ingress_path!("/"),
|
||||
};
|
||||
|
||||
let path_type = match self.path_type.clone() {
|
||||
Some(p) => p,
|
||||
None => PathType::Prefix,
|
||||
};
|
||||
|
||||
let ingress = json!(
|
||||
{
|
||||
"metadata": {
|
||||
"name": self.name.to_string(),
|
||||
},
|
||||
"spec": {
|
||||
"rules": [
|
||||
{ "host": self.host.to_string(),
|
||||
"http": {
|
||||
"paths": [
|
||||
{
|
||||
"path": path,
|
||||
"pathType": path_type.as_str(),
|
||||
"backend": {
|
||||
"service": {
|
||||
"name": self.backend_service.to_string(),
|
||||
"port": {
|
||||
"number": self.port,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
trace!("Building ingresss object from Value {ingress:#}");
|
||||
let ingress: Ingress = serde_json::from_value(ingress).unwrap();
|
||||
debug!(
|
||||
"Successfully built Ingress for host {:?}",
|
||||
ingress.metadata.name
|
||||
);
|
||||
Box::new(K8sResourceInterpret {
|
||||
score: K8sResourceScore::single(
|
||||
ingress.clone(),
|
||||
self.namespace.clone().map(|f| f.to_string()),
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("{} K8sIngressScore", self.name)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,2 @@
|
||||
pub mod deployment;
|
||||
pub mod ingress;
|
||||
pub mod namespace;
|
||||
pub mod resource;
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
use k8s_openapi::api::core::v1::Namespace;
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::{
|
||||
interpret::Interpret,
|
||||
score::Score,
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct K8sNamespaceScore {
|
||||
pub name: Option<NonBlankString>,
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for K8sNamespaceScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let name = match &self.name {
|
||||
Some(name) => name,
|
||||
None => todo!(
|
||||
"Return NoOp interpret when no namespace specified or something that makes sense"
|
||||
),
|
||||
};
|
||||
let _namespace: Namespace = serde_json::from_value(json!(
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": name,
|
||||
},
|
||||
}
|
||||
))
|
||||
.unwrap();
|
||||
todo!(
|
||||
"We currently only support namespaced ressources (see Scope = NamespaceResourceScope)"
|
||||
);
|
||||
// Box::new(K8sResourceInterpret {
|
||||
// score: K8sResourceScore::single(namespace.clone()),
|
||||
// })
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"K8sNamespaceScore".to_string()
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user