Compare commits
139 Commits
feat/ceph-
...
doc-and-br
| Author | SHA1 | Date | |
|---|---|---|---|
| b885c35706 | |||
|
|
bb6b4b7f88 | ||
| 83c1cc82b6 | |||
| 66d346a10c | |||
| 06a004a65d | |||
| 9d4e6acac0 | |||
| 4ff57062ae | |||
| 50ce54ea66 | |||
|
|
827a49e56b | ||
| 95cfc03518 | |||
| c80ede706b | |||
| b2825ec1ef | |||
| 609d7acb5d | |||
| de761cf538 | |||
| c069207f12 | |||
|
|
7368184917 | ||
| 05205f4ac1 | |||
| 3174645c97 | |||
| 7536f4ec4b | |||
| 464347d3e5 | |||
| 7f415f5b98 | |||
| 2a520a1d7c | |||
| 987f195e2f | |||
| 14d1823d15 | |||
| 2a48d51479 | |||
| 20a227bb41 | |||
| ce91ee0168 | |||
| ed7f81aa1f | |||
| cb66b7592e | |||
| a815f6ac9c | |||
| 2d891e4463 | |||
| f66e58b9ca | |||
| ea39d93aa7 | |||
| 6989d208cf | |||
| c0d54a4466 | |||
| fc384599a1 | |||
| c0bd8007c7 | |||
| 7dff70edcf | |||
| 06a0c44c3c | |||
| 85bec66e58 | |||
| 1f3796f503 | |||
| cf576192a8 | |||
| 5f78300d78 | |||
| f7e9669009 | |||
| 2d3c32469c | |||
| f65e16df7b | |||
| 1cec398d4d | |||
| 58b6268989 | |||
| cbbaae2ac8 | |||
| 4a500e4eb7 | |||
| f073b7e5fb | |||
| c84b2413ed | |||
| f83fd09f11 | |||
| c15bd53331 | |||
| 6e6f57e38c | |||
| 6f55f79281 | |||
| 19f87fdaf7 | |||
| 49370af176 | |||
| cf0b8326dc | |||
| 1e2563f7d1 | |||
| 7f50c36f11 | |||
| 4df451bc41 | |||
| 49dad343ad | |||
| 9961e8b79d | |||
| 9b889f71da | |||
| 7514ebfb5c | |||
| b3ae4e6611 | |||
| 8424778871 | |||
| 7bc083701e | |||
| 4fa2b8deb6 | |||
|
|
f3639c604c | ||
| 258cfa279e | |||
| ceafabf430 | |||
| 11481b16cd | |||
| 21dcb75408 | |||
| a5f9ecfcf7 | |||
| 849bd79710 | |||
| c5101e096a | |||
| cd0720f43e | |||
| b9e04d21da | |||
| a0884950d7 | |||
| 29d22a611f | |||
| 3bf5cb0526 | |||
| 54803c40a2 | |||
| 288129b0c1 | |||
| 665ed24f65 | |||
| 3d088b709f | |||
| da5a869771 | |||
| fedb346548 | |||
| 6ea5630d30 | |||
| b42815f79c | |||
| ed70bfd236 | |||
| 0a324184ad | |||
| ad2ae2e4f8 | |||
|
|
0a5da43c76 | ||
| b6be44202e | |||
| c372e781d8 | |||
| 56c181fc3d | |||
| 55bfe306ad | |||
| 62fa3c2b10 | |||
| ea1380f98a | |||
| 701d8cfab9 | |||
| f9906cb419 | |||
| cb4382fbb5 | |||
| 1eca2cc1a9 | |||
| 269f13ae9b | |||
| ec277bc13d | |||
| a9f8cd16ea | |||
| c542a935e3 | |||
| 0395d11e98 | |||
| 05e7b8075c | |||
| b857412151 | |||
| 7bb3602ab8 | |||
| 78b80c2169 | |||
| 0876f4e4f0 | |||
| 6ac0e095a3 | |||
| ff2efc0a66 | |||
|
|
f180cc4c80 | ||
| 3ca31179d0 | |||
| a9fe4ab267 | |||
| 65cc9befeb | |||
| d456a1f9ee | |||
| 8cc7adf196 | |||
| a1ab5d40fb | |||
| 6c92dd24f7 | |||
| c805d7e018 | |||
| b33615b969 | |||
| 0f59f29ac4 | |||
| 361f240762 | |||
| 57c3b01e66 | |||
| 94ddf027dd | |||
| 06a2be4496 | |||
| e2a09efdee | |||
| 2618441de3 | |||
| da6610c625 | |||
| e956772593 | |||
| 27c51e0ec5 | |||
| 597dcbc848 | |||
| a53e8552e9 |
2
.gitattributes
vendored
@@ -2,3 +2,5 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text
|
||||
grubx64.efi filter=lfs diff=lfs merge=lfs -text
|
||||
initrd filter=lfs diff=lfs merge=lfs -text
|
||||
linux filter=lfs diff=lfs merge=lfs -text
|
||||
data/okd/bin/* filter=lfs diff=lfs merge=lfs -text
|
||||
data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
1
.gitignore
vendored
@@ -3,6 +3,7 @@ private_repos/
|
||||
|
||||
### Harmony ###
|
||||
harmony.log
|
||||
data/okd/installation_files*
|
||||
|
||||
### Helm ###
|
||||
# Chart dependencies
|
||||
|
||||
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "examples/try_rust_webapp/tryrust.org"]
|
||||
path = examples/try_rust_webapp/tryrust.org
|
||||
url = https://github.com/rust-dd/tryrust.org.git
|
||||
20
.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json
generated
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "host_id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
|
||||
}
|
||||
32
.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json
generated
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json<PhysicalHost>\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "version_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "data: Json<PhysicalHost>",
|
||||
"ordinal": 2,
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067"
|
||||
}
|
||||
32
.sqlx/query-934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990.json
generated
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id, version_id, data as \"data: Json<PhysicalHost>\" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "version_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "data: Json<PhysicalHost>",
|
||||
"ordinal": 2,
|
||||
"type_info": "Null"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990"
|
||||
}
|
||||
12
.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
|
||||
}
|
||||
12
.sqlx/query-f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8"
|
||||
}
|
||||
1735
Cargo.lock
generated
14
Cargo.toml
@@ -15,6 +15,8 @@ members = [
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
"harmony_secret",
|
||||
"adr/agent_discovery/mdns",
|
||||
"brocade",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -36,7 +38,7 @@ tokio = { version = "1.40", features = [
|
||||
cidr = { features = ["serde"], version = "0.2" }
|
||||
russh = "0.45"
|
||||
russh-keys = "0.45"
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
url = "2.5"
|
||||
kube = { version = "1.1.0", features = [
|
||||
"config",
|
||||
@@ -65,3 +67,13 @@ directories = "6.0.0"
|
||||
thiserror = "2.0.14"
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
askama = "0.14"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
|
||||
reqwest = { version = "0.12", features = [
|
||||
"blocking",
|
||||
"stream",
|
||||
"rustls-tls",
|
||||
"http2",
|
||||
"json",
|
||||
], default-features = false }
|
||||
assertor = "0.0.4"
|
||||
|
||||
156
README.md
@@ -1,4 +1,6 @@
|
||||
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
|
||||
# Harmony
|
||||
|
||||
Open-source infrastructure orchestration that treats your platform like first-class code.
|
||||
|
||||
_By [NationTech](https://nationtech.io)_
|
||||
|
||||
@@ -18,9 +20,7 @@ All in **one strongly-typed Rust codebase**.
|
||||
|
||||
From a **developer laptop** to a **global production cluster**, a single **source of truth** drives the **full software lifecycle.**
|
||||
|
||||
---
|
||||
|
||||
## 1 · The Harmony Philosophy
|
||||
## The Harmony Philosophy
|
||||
|
||||
Infrastructure is essential, but it shouldn’t be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
|
||||
|
||||
@@ -32,112 +32,102 @@ Infrastructure is essential, but it shouldn’t be your core business. Harmony i
|
||||
|
||||
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
|
||||
|
||||
---
|
||||
## Where to Start
|
||||
|
||||
## 2 · Quick Start
|
||||
We have a comprehensive set of documentation right here in the repository.
|
||||
|
||||
The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||
| I want to... | Start Here |
|
||||
| ----------------- | ------------------------------------------------------------------ |
|
||||
| Get Started | [Getting Started Guide](./docs/guides/getting-started.md) |
|
||||
| See an Example | [Use Case: Deploy a Rust Web App](./docs/use-cases/rust-webapp.md) |
|
||||
| Explore | [Documentation Hub](./docs/README.md) |
|
||||
| See Core Concepts | [Core Concepts Explained](./docs/concepts.md) |
|
||||
|
||||
## Quick Look: Deploy a Rust Webapp
|
||||
|
||||
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
lamp::{LAMPConfig, LAMPScore},
|
||||
monitoring::monitoring_alerting::MonitoringAlertingStackScore,
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// 1. Describe what you want
|
||||
let lamp_stack = LAMPScore {
|
||||
name: "harmony-lamp-demo".into(),
|
||||
domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()),
|
||||
php_version: Version::from("8.3.0").unwrap(),
|
||||
config: LAMPConfig {
|
||||
project_root: "./php".into(),
|
||||
database_size: "4Gi".into(),
|
||||
..Default::default()
|
||||
},
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-leptos".to_string(),
|
||||
project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
});
|
||||
|
||||
// Define your Application deployment and the features you want
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(PackagingDeployment {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![
|
||||
Box::new(DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url
|
||||
}),
|
||||
],
|
||||
}),
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
// 2. Enhance with extra scores (monitoring, CI/CD, …)
|
||||
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||
|
||||
// 3. Run your scores on the desired topology & inventory
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||
vec![
|
||||
Box::new(lamp_stack),
|
||||
Box::new(monitoring)
|
||||
],
|
||||
None
|
||||
).await.unwrap();
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster
|
||||
vec![Box::new(app)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
Run it:
|
||||
To run this:
|
||||
|
||||
```bash
|
||||
cargo run
|
||||
```
|
||||
- Clone the repository: `git clone https://git.nationtech.io/nationtech/harmony`
|
||||
- Install dependencies: `cargo build --release`
|
||||
- Run the example: `cargo run --example try_rust_webapp`
|
||||
|
||||
Harmony analyses the code, shows an execution plan in a TUI, and applies it once you confirm. Same code, same binary—every environment.
|
||||
## Documentation
|
||||
|
||||
---
|
||||
All documentation is in the `/docs` directory.
|
||||
|
||||
## 3 · Core Concepts
|
||||
- [Documentation Hub](./docs/README.md): The main entry point for all documentation.
|
||||
- [Core Concepts](./docs/concepts.md): A detailed look at Score, Topology, Capability, Inventory, and Interpret.
|
||||
- [Component Catalogs](./docs/catalogs/README.md): Discover all available Scores, Topologies, and Capabilities.
|
||||
- [Developer Guide](./docs/guides/developer-guide.md): Learn how to write your own Scores and Topologies.
|
||||
|
||||
| Term | One-liner |
|
||||
| ---------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
|
||||
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
|
||||
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
|
||||
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
|
||||
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
|
||||
## Architectural Decision Records
|
||||
|
||||
A visual overview is in the diagram below.
|
||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
||||
|
||||
[Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg)
|
||||
## Contribute
|
||||
|
||||
---
|
||||
Discussions and roadmap live in [Issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||
|
||||
## 4 · Install
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Rust
|
||||
- Docker (if you deploy locally)
|
||||
- `kubectl` / `helm` for Kubernetes-based topologies
|
||||
|
||||
```bash
|
||||
git clone https://git.nationtech.io/nationtech/harmony
|
||||
cd harmony
|
||||
cargo build --release # builds the CLI, TUI and libraries
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5 · Learning More
|
||||
|
||||
- **Architectural Decision Records** – dive into the rationale
|
||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
||||
|
||||
- **Extending Harmony** – write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
|
||||
|
||||
- **Community** – discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||
|
||||
---
|
||||
|
||||
## 6 · License
|
||||
## License
|
||||
|
||||
Harmony is released under the **GNU AGPL v3**.
|
||||
|
||||
|
||||
17
adr/agent_discovery/mdns/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "mdns"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
mdns-sd = "0.14"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
futures = "0.3"
|
||||
dmidecode = "0.2" # For getting the motherboard ID on the agent
|
||||
log.workspace=true
|
||||
env_logger.workspace=true
|
||||
clap = { version = "4.5.46", features = ["derive"] }
|
||||
get_if_addrs = "0.5.3"
|
||||
local-ip-address = "0.6.5"
|
||||
60
adr/agent_discovery/mdns/src/advertise.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// harmony-agent/src/main.rs
|
||||
|
||||
use log::info;
|
||||
use mdns_sd::{ServiceDaemon, ServiceInfo};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::SERVICE_TYPE;
|
||||
|
||||
// The service we are advertising.
|
||||
const SERVICE_PORT: u16 = 43210; // A port for the service. It needs one, even if unused.
|
||||
|
||||
pub async fn advertise() {
|
||||
info!("Starting Harmony Agent...");
|
||||
|
||||
// Get a unique ID for this machine.
|
||||
let motherboard_id = "some motherboard id";
|
||||
let instance_name = format!("harmony-agent-{}", motherboard_id);
|
||||
info!("This agent's instance name: {}", instance_name);
|
||||
info!("Advertising with ID: {}", motherboard_id);
|
||||
|
||||
// Create a new mDNS daemon.
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
||||
|
||||
// Create a TXT record HashMap to hold our metadata.
|
||||
let mut properties = HashMap::new();
|
||||
properties.insert("id".to_string(), motherboard_id.to_string());
|
||||
properties.insert("version".to_string(), "1.0".to_string());
|
||||
|
||||
// Create the service information.
|
||||
// The instance name should be unique on the network.
|
||||
let local_ip = local_ip_address::local_ip().unwrap();
|
||||
let service_info = ServiceInfo::new(
|
||||
SERVICE_TYPE,
|
||||
&instance_name,
|
||||
"harmony-host.local.", // A hostname for the service
|
||||
local_ip,
|
||||
// "0.0.0.0",
|
||||
SERVICE_PORT,
|
||||
Some(properties),
|
||||
)
|
||||
.expect("Failed to create service info");
|
||||
|
||||
// Register our service with the daemon.
|
||||
mdns.register(service_info)
|
||||
.expect("Failed to register service");
|
||||
|
||||
info!(
|
||||
"Service '{}' registered and now being advertised.",
|
||||
instance_name
|
||||
);
|
||||
info!("Agent is running. Press Ctrl+C to exit.");
|
||||
|
||||
for iface in get_if_addrs::get_if_addrs().unwrap() {
|
||||
println!("{:#?}", iface);
|
||||
}
|
||||
|
||||
// Keep the agent running indefinitely.
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Shutting down agent.");
|
||||
}
|
||||
109
adr/agent_discovery/mdns/src/discover.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
|
||||
use crate::SERVICE_TYPE;
|
||||
|
||||
pub async fn discover() {
|
||||
println!("Starting Harmony Master and browsing for agents...");
|
||||
|
||||
// Create a new mDNS daemon.
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
||||
|
||||
// Start browsing for the service type.
|
||||
// The receiver will be a stream of events.
|
||||
let receiver = mdns.browse(SERVICE_TYPE).expect("Failed to browse");
|
||||
|
||||
println!(
|
||||
"Listening for mDNS events for '{}'. Press Ctrl+C to exit.",
|
||||
SERVICE_TYPE
|
||||
);
|
||||
|
||||
std::thread::spawn(move || {
|
||||
while let Ok(event) = receiver.recv() {
|
||||
match event {
|
||||
ServiceEvent::ServiceData(resolved) => {
|
||||
println!("Resolved a new service: {}", resolved.fullname);
|
||||
}
|
||||
other_event => {
|
||||
println!("Received other event: {:?}", &other_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Gracefully shutdown the daemon.
|
||||
std::thread::sleep(std::time::Duration::from_secs(1000000));
|
||||
mdns.shutdown().unwrap();
|
||||
|
||||
// Process events as they come in.
|
||||
// while let Ok(event) = receiver.recv_async().await {
|
||||
// debug!("Received event {event:?}");
|
||||
// // match event {
|
||||
// // ServiceEvent::ServiceFound(svc_type, fullname) => {
|
||||
// // println!("\n--- Agent Discovered ---");
|
||||
// // println!(" Service Name: {}", fullname());
|
||||
// // // You can now resolve this service to get its IP, port, and TXT records
|
||||
// // // The resolve operation is a separate network call.
|
||||
// // let receiver = mdns.browse(info.get_fullname()).unwrap();
|
||||
// // if let Ok(resolve_event) = receiver.recv_timeout(Duration::from_secs(2)) {
|
||||
// // if let ServiceEvent::ServiceResolved(info) = resolve_event {
|
||||
// // let ip = info.get_addresses().iter().next().unwrap();
|
||||
// // let port = info.get_port();
|
||||
// // let motherboard_id = info.get_property("id").map_or("N/A", |v| v.val_str());
|
||||
// //
|
||||
// // println!(" IP: {}:{}", ip, port);
|
||||
// // println!(" Motherboard ID: {}", motherboard_id);
|
||||
// // println!("------------------------");
|
||||
// //
|
||||
// // // TODO: Add this agent to your central list of discovered hosts.
|
||||
// // }
|
||||
// // } else {
|
||||
// // println!("Could not resolve service '{}' in time.", info.get_fullname());
|
||||
// // }
|
||||
// // }
|
||||
// // ServiceEvent::ServiceRemoved(info) => {
|
||||
// // println!("\n--- Agent Removed ---");
|
||||
// // println!(" Service Name: {}", info.get_fullname());
|
||||
// // println!("---------------------");
|
||||
// // // TODO: Remove this agent from your list.
|
||||
// // }
|
||||
// // _ => {
|
||||
// // // We don't care about other event types for this example
|
||||
// // }
|
||||
// // }
|
||||
// }
|
||||
}
|
||||
|
||||
async fn _discover_example() {
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
|
||||
// Create a daemon
|
||||
let mdns = ServiceDaemon::new().expect("Failed to create daemon");
|
||||
|
||||
// Use recently added `ServiceEvent::ServiceData`.
|
||||
mdns.use_service_data(true)
|
||||
.expect("Failed to use ServiceData");
|
||||
|
||||
// Browse for a service type.
|
||||
let service_type = "_mdns-sd-my-test._udp.local.";
|
||||
let receiver = mdns.browse(service_type).expect("Failed to browse");
|
||||
|
||||
// Receive the browse events in sync or async. Here is
|
||||
// an example of using a thread. Users can call `receiver.recv_async().await`
|
||||
// if running in async environment.
|
||||
std::thread::spawn(move || {
|
||||
while let Ok(event) = receiver.recv() {
|
||||
match event {
|
||||
ServiceEvent::ServiceData(resolved) => {
|
||||
println!("Resolved a new service: {}", resolved.fullname);
|
||||
}
|
||||
other_event => {
|
||||
println!("Received other event: {:?}", &other_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Gracefully shutdown the daemon.
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
mdns.shutdown().unwrap();
|
||||
}
|
||||
31
adr/agent_discovery/mdns/src/main.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use clap::{Parser, ValueEnum};
|
||||
|
||||
mod advertise;
|
||||
mod discover;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
#[arg(value_enum)]
|
||||
profile: Profiles,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
|
||||
enum Profiles {
|
||||
Advertise,
|
||||
Discover,
|
||||
}
|
||||
|
||||
// The service type we are looking for.
|
||||
const SERVICE_TYPE: &str = "_harmony._tcp.local.";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
let args = Args::parse();
|
||||
|
||||
match args.profile {
|
||||
Profiles::Advertise => advertise::advertise().await,
|
||||
Profiles::Discover => discover::discover().await,
|
||||
}
|
||||
}
|
||||
18
brocade/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "brocade"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
harmony_types = { path = "../harmony_types" }
|
||||
russh.workspace = true
|
||||
russh-keys.workspace = true
|
||||
tokio.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
regex = "1.11.3"
|
||||
harmony_secret = { path = "../harmony_secret" }
|
||||
serde.workspace = true
|
||||
70
brocade/examples/main.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use harmony_types::switch::PortLocation;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
||||
struct BrocadeSwitchAuth {
|
||||
username: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
||||
let switch_addresses = vec![ip];
|
||||
|
||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let brocade = brocade::init(
|
||||
&switch_addresses,
|
||||
22,
|
||||
&config.username,
|
||||
&config.password,
|
||||
Some(BrocadeOptions {
|
||||
dry_run: true,
|
||||
..Default::default()
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.expect("Brocade client failed to connect");
|
||||
|
||||
let entries = brocade.get_stack_topology().await.unwrap();
|
||||
println!("Stack topology: {entries:#?}");
|
||||
|
||||
let entries = brocade.get_interfaces().await.unwrap();
|
||||
println!("Interfaces: {entries:#?}");
|
||||
|
||||
let version = brocade.version().await.unwrap();
|
||||
println!("Version: {version:?}");
|
||||
|
||||
println!("--------------");
|
||||
let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
|
||||
println!("VLAN\tMAC\t\t\tPORT");
|
||||
for mac in mac_adddresses {
|
||||
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
|
||||
}
|
||||
|
||||
println!("--------------");
|
||||
let channel_name = "1";
|
||||
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||
|
||||
println!("--------------");
|
||||
let channel_id = brocade.find_available_channel_id().await.unwrap();
|
||||
|
||||
println!("--------------");
|
||||
let channel_name = "HARMONY_LAG";
|
||||
let ports = [PortLocation(2, 0, 35)];
|
||||
brocade
|
||||
.create_port_channel(channel_id, channel_name, &ports)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
212
brocade/src/fast_iron.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
use super::BrocadeClient;
|
||||
use crate::{
|
||||
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
||||
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||
use log::{debug, info};
|
||||
use regex::Regex;
|
||||
use std::{collections::HashSet, str::FromStr};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FastIronClient {
|
||||
shell: BrocadeShell,
|
||||
version: BrocadeInfo,
|
||||
}
|
||||
|
||||
impl FastIronClient {
|
||||
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
||||
shell.before_all(vec!["skip-page-display".into()]);
|
||||
shell.after_all(vec!["page".into()]);
|
||||
|
||||
Self {
|
||||
shell,
|
||||
version: version_info,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
||||
debug!("[Brocade] Parsing mac address entry: {line}");
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 3 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (vlan, mac_address, port) = match parts.len() {
|
||||
3 => (
|
||||
u16::from_str(parts[0]).ok()?,
|
||||
parse_brocade_mac_address(parts[1]).ok()?,
|
||||
parts[2].to_string(),
|
||||
),
|
||||
_ => (
|
||||
1,
|
||||
parse_brocade_mac_address(parts[0]).ok()?,
|
||||
parts[1].to_string(),
|
||||
),
|
||||
};
|
||||
|
||||
let port =
|
||||
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
||||
|
||||
match port {
|
||||
Ok(p) => Some(Ok(MacAddressEntry {
|
||||
vlan,
|
||||
mac_address,
|
||||
port: p,
|
||||
})),
|
||||
Err(e) => Some(Err(e)),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
||||
debug!("[Brocade] Parsing stack port entry: {line}");
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 10 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let local_port = PortLocation::from_str(parts[0]).ok()?;
|
||||
|
||||
Some(Ok(InterSwitchLink {
|
||||
local_port,
|
||||
remote_port: None,
|
||||
}))
|
||||
}
|
||||
|
||||
fn build_port_channel_commands(
|
||||
&self,
|
||||
channel_id: PortChannelId,
|
||||
channel_name: &str,
|
||||
ports: &[PortLocation],
|
||||
) -> Vec<String> {
|
||||
let mut commands = vec![
|
||||
"configure terminal".to_string(),
|
||||
format!("lag {channel_name} static id {channel_id}"),
|
||||
];
|
||||
|
||||
for port in ports {
|
||||
commands.push(format!("ports ethernet {port}"));
|
||||
}
|
||||
|
||||
commands.push(format!("primary-port {}", ports[0]));
|
||||
commands.push("deploy".into());
|
||||
commands.push("exit".into());
|
||||
commands.push("write memory".into());
|
||||
commands.push("exit".into());
|
||||
|
||||
commands
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl BrocadeClient for FastIronClient {
|
||||
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||
Ok(self.version.clone())
|
||||
}
|
||||
|
||||
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||
info!("[Brocade] Showing MAC address table...");
|
||||
|
||||
let output = self
|
||||
.shell
|
||||
.run_command("show mac-address", ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
output
|
||||
.lines()
|
||||
.skip(2)
|
||||
.filter_map(|line| self.parse_mac_entry(line))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||
let output = self
|
||||
.shell
|
||||
.run_command("show interface stack-ports", crate::ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
output
|
||||
.lines()
|
||||
.skip(1)
|
||||
.filter_map(|line| self.parse_stack_port_entry(line))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
_interfaces: Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||
info!("[Brocade] Finding next available channel id...");
|
||||
|
||||
let output = self
|
||||
.shell
|
||||
.run_command("show lag", ExecutionMode::Regular)
|
||||
.await?;
|
||||
let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex");
|
||||
|
||||
let used_ids: HashSet<u8> = output
|
||||
.lines()
|
||||
.filter_map(|line| {
|
||||
re.captures(line)
|
||||
.and_then(|c| c.get(1))
|
||||
.and_then(|id_match| id_match.as_str().parse().ok())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut next_id: u8 = 1;
|
||||
loop {
|
||||
if !used_ids.contains(&next_id) {
|
||||
break;
|
||||
}
|
||||
next_id += 1;
|
||||
}
|
||||
|
||||
info!("[Brocade] Found channel id: {next_id}");
|
||||
Ok(next_id)
|
||||
}
|
||||
|
||||
async fn create_port_channel(
|
||||
&self,
|
||||
channel_id: PortChannelId,
|
||||
channel_name: &str,
|
||||
ports: &[PortLocation],
|
||||
) -> Result<(), Error> {
|
||||
info!(
|
||||
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
||||
);
|
||||
|
||||
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Privileged)
|
||||
.await?;
|
||||
|
||||
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
||||
info!("[Brocade] Clearing port-channel: {channel_name}");
|
||||
|
||||
let commands = vec![
|
||||
"configure terminal".to_string(),
|
||||
format!("no lag {channel_name}"),
|
||||
"write memory".to_string(),
|
||||
];
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Privileged)
|
||||
.await?;
|
||||
|
||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
338
brocade/src/lib.rs
Normal file
@@ -0,0 +1,338 @@
|
||||
use std::net::IpAddr;
|
||||
use std::{
|
||||
fmt::{self, Display},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::network_operating_system::NetworkOperatingSystemClient;
|
||||
use crate::{
|
||||
fast_iron::FastIronClient,
|
||||
shell::{BrocadeSession, BrocadeShell},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||
use regex::Regex;
|
||||
|
||||
mod fast_iron;
|
||||
mod network_operating_system;
|
||||
mod shell;
|
||||
mod ssh;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct BrocadeOptions {
|
||||
pub dry_run: bool,
|
||||
pub ssh: ssh::SshOptions,
|
||||
pub timeouts: TimeoutConfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TimeoutConfig {
|
||||
pub shell_ready: Duration,
|
||||
pub command_execution: Duration,
|
||||
pub command_output: Duration,
|
||||
pub cleanup: Duration,
|
||||
pub message_wait: Duration,
|
||||
}
|
||||
|
||||
impl Default for TimeoutConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shell_ready: Duration::from_secs(10),
|
||||
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
||||
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
|
||||
cleanup: Duration::from_secs(10),
|
||||
message_wait: Duration::from_millis(500),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum ExecutionMode {
|
||||
Regular,
|
||||
Privileged,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BrocadeInfo {
|
||||
os: BrocadeOs,
|
||||
version: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum BrocadeOs {
|
||||
NetworkOperatingSystem,
|
||||
FastIron,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
pub struct MacAddressEntry {
|
||||
pub vlan: u16,
|
||||
pub mac_address: MacAddress,
|
||||
pub port: PortDeclaration,
|
||||
}
|
||||
|
||||
pub type PortChannelId = u8;
|
||||
|
||||
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
|
||||
///
|
||||
/// This structure provides a standardized view of the topology regardless of the
|
||||
/// underlying Brocade OS configuration (stacking vs. fabric).
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct InterSwitchLink {
|
||||
/// The local port on the switch where the topology command was run.
|
||||
pub local_port: PortLocation,
|
||||
/// The port on the directly connected neighboring switch.
|
||||
pub remote_port: Option<PortLocation>,
|
||||
}
|
||||
|
||||
/// Represents the key running configuration status of a single switch interface.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct InterfaceInfo {
|
||||
/// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
|
||||
pub name: String,
|
||||
/// The physical location of the interface.
|
||||
pub port_location: PortLocation,
|
||||
/// The parsed type and name prefix of the interface.
|
||||
pub interface_type: InterfaceType,
|
||||
/// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
|
||||
pub operating_mode: Option<PortOperatingMode>,
|
||||
/// Indicates the current state of the interface.
|
||||
pub status: InterfaceStatus,
|
||||
}
|
||||
|
||||
/// Categorizes the functional type of a switch interface.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum InterfaceType {
|
||||
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
|
||||
Ethernet(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for InterfaceType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
InterfaceType::Ethernet(name) => write!(f, "{name}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum PortOperatingMode {
|
||||
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
||||
Fabric,
|
||||
/// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
|
||||
Trunk,
|
||||
/// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
|
||||
Access,
|
||||
}
|
||||
|
||||
/// Defines the possible status of an interface.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum InterfaceStatus {
|
||||
/// The interface is connected.
|
||||
Connected,
|
||||
/// The interface is not connected and is not expected to be.
|
||||
NotConnected,
|
||||
/// The interface is not connected but is expected to be (configured with `no shutdown`).
|
||||
SfpAbsent,
|
||||
}
|
||||
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
||||
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
||||
|
||||
let version_info = shell
|
||||
.with_session(ExecutionMode::Regular, |session| {
|
||||
Box::pin(get_brocade_info(session))
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(match version_info.os {
|
||||
BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
|
||||
BrocadeOs::NetworkOperatingSystem => {
|
||||
Box::new(NetworkOperatingSystemClient::init(shell, version_info))
|
||||
}
|
||||
BrocadeOs::Unknown => todo!(),
|
||||
})
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait BrocadeClient: std::fmt::Debug {
|
||||
/// Retrieves the operating system and version details from the connected Brocade switch.
|
||||
///
|
||||
/// This is typically the first call made after establishing a connection to determine
|
||||
/// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A `BrocadeInfo` structure containing parsed OS type and version string.
|
||||
async fn version(&self) -> Result<BrocadeInfo, Error>;
|
||||
|
||||
/// Retrieves the dynamically learned MAC address table from the switch.
|
||||
///
|
||||
/// This is crucial for discovering where specific network endpoints (MAC addresses)
|
||||
/// are currently located on the physical ports.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
|
||||
/// and the associated port name/index.
|
||||
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
|
||||
|
||||
/// Derives the physical connections used to link multiple switches together
|
||||
/// to form a single logical entity (stack, fabric, etc.).
|
||||
///
|
||||
/// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
|
||||
/// to return a standardized view of the topology.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
|
||||
/// If the switch is not stacked, returns an empty vector.
|
||||
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
|
||||
|
||||
/// Retrieves the status for all interfaces
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A vector of `InterfaceInfo` structures.
|
||||
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
|
||||
|
||||
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Scans the existing configuration to find the next available (unused)
|
||||
/// Port-Channel ID (`lag` or `trunk`) for assignment.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The smallest, unassigned `PortChannelId` within the supported range.
|
||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
|
||||
|
||||
/// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
|
||||
/// using the specified channel ID and ports.
|
||||
///
|
||||
/// The resulting configuration must be persistent (saved to startup-config).
|
||||
/// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
|
||||
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
|
||||
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
|
||||
async fn create_port_channel(
|
||||
&self,
|
||||
channel_id: PortChannelId,
|
||||
channel_name: &str,
|
||||
ports: &[PortLocation],
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Removes all configuration associated with the specified Port-Channel name.
|
||||
///
|
||||
/// This operation should be idempotent; attempting to clear a non-existent
|
||||
/// channel should succeed (or return a benign error).
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// * `channel_name`: The name of the Port-Channel (LAG) to delete.
|
||||
///
|
||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> {
|
||||
let output = session.run_command("show version").await?;
|
||||
|
||||
if output.contains("Network Operating System") {
|
||||
let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
||||
.expect("Invalid regex");
|
||||
let version = re
|
||||
.captures(&output)
|
||||
.and_then(|cap| cap.name("version"))
|
||||
.map(|m| m.as_str().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
return Ok(BrocadeInfo {
|
||||
os: BrocadeOs::NetworkOperatingSystem,
|
||||
version,
|
||||
});
|
||||
} else if output.contains("ICX") {
|
||||
let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
||||
.expect("Invalid regex");
|
||||
let version = re
|
||||
.captures(&output)
|
||||
.and_then(|cap| cap.name("version"))
|
||||
.map(|m| m.as_str().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
return Ok(BrocadeInfo {
|
||||
os: BrocadeOs::FastIron,
|
||||
version,
|
||||
});
|
||||
}
|
||||
|
||||
Err(Error::UnexpectedError("Unknown Brocade OS version".into()))
|
||||
}
|
||||
|
||||
fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
|
||||
let cleaned_mac = value.replace('.', "");
|
||||
|
||||
if cleaned_mac.len() != 12 {
|
||||
return Err(format!("Invalid MAC address: {value}"));
|
||||
}
|
||||
|
||||
let mut bytes = [0u8; 6];
|
||||
for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() {
|
||||
let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?;
|
||||
bytes[i] =
|
||||
u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?;
|
||||
}
|
||||
|
||||
Ok(MacAddress(bytes))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
NetworkError(String),
|
||||
AuthenticationError(String),
|
||||
ConfigurationError(String),
|
||||
TimeoutError(String),
|
||||
UnexpectedError(String),
|
||||
CommandError(String),
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Error::NetworkError(msg) => write!(f, "Network error: {msg}"),
|
||||
Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"),
|
||||
Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"),
|
||||
Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"),
|
||||
Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"),
|
||||
Error::CommandError(msg) => write!(f, "{msg}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for String {
|
||||
fn from(val: Error) -> Self {
|
||||
format!("{val}")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
impl From<russh::Error> for Error {
|
||||
fn from(value: russh::Error) -> Self {
|
||||
Error::NetworkError(format!("Russh client error: {value}"))
|
||||
}
|
||||
}
|
||||
333
brocade/src/network_operating_system.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||
use log::{debug, info};
|
||||
use regex::Regex;
|
||||
|
||||
use crate::{
|
||||
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||
parse_brocade_mac_address, shell::BrocadeShell,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NetworkOperatingSystemClient {
|
||||
shell: BrocadeShell,
|
||||
version: BrocadeInfo,
|
||||
}
|
||||
|
||||
impl NetworkOperatingSystemClient {
|
||||
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
||||
shell.before_all(vec!["terminal length 0".into()]);
|
||||
|
||||
Self {
|
||||
shell,
|
||||
version: version_info,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
||||
debug!("[Brocade] Parsing mac address entry: {line}");
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 5 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (vlan, mac_address, port) = match parts.len() {
|
||||
5 => (
|
||||
u16::from_str(parts[0]).ok()?,
|
||||
parse_brocade_mac_address(parts[1]).ok()?,
|
||||
parts[4].to_string(),
|
||||
),
|
||||
_ => (
|
||||
u16::from_str(parts[0]).ok()?,
|
||||
parse_brocade_mac_address(parts[1]).ok()?,
|
||||
parts[5].to_string(),
|
||||
),
|
||||
};
|
||||
|
||||
let port =
|
||||
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
||||
|
||||
match port {
|
||||
Ok(p) => Some(Ok(MacAddressEntry {
|
||||
vlan,
|
||||
mac_address,
|
||||
port: p,
|
||||
})),
|
||||
Err(e) => Some(Err(e)),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
||||
debug!("[Brocade] Parsing inter switch link entry: {line}");
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 10 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let local_port = PortLocation::from_str(parts[2]).ok()?;
|
||||
let remote_port = PortLocation::from_str(parts[5]).ok()?;
|
||||
|
||||
Some(Ok(InterSwitchLink {
|
||||
local_port,
|
||||
remote_port: Some(remote_port),
|
||||
}))
|
||||
}
|
||||
|
||||
fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
|
||||
debug!("[Brocade] Parsing interface status entry: {line}");
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 6 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let interface_type = match parts[0] {
|
||||
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
|
||||
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
|
||||
_ => return None,
|
||||
};
|
||||
let port_location = PortLocation::from_str(parts[1]).ok()?;
|
||||
let status = match parts[2] {
|
||||
"connected" => InterfaceStatus::Connected,
|
||||
"notconnected" => InterfaceStatus::NotConnected,
|
||||
"sfpAbsent" => InterfaceStatus::SfpAbsent,
|
||||
_ => return None,
|
||||
};
|
||||
let operating_mode = match parts[3] {
|
||||
"ISL" => Some(PortOperatingMode::Fabric),
|
||||
"Trunk" => Some(PortOperatingMode::Trunk),
|
||||
"Access" => Some(PortOperatingMode::Access),
|
||||
"--" => None,
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
Some(Ok(InterfaceInfo {
|
||||
name: format!("{interface_type} {port_location}"),
|
||||
port_location,
|
||||
interface_type,
|
||||
operating_mode,
|
||||
status,
|
||||
}))
|
||||
}
|
||||
|
||||
fn map_configure_interfaces_error(&self, err: Error) -> Error {
|
||||
debug!("[Brocade] {err}");
|
||||
|
||||
if let Error::CommandError(message) = &err {
|
||||
if message.contains("switchport")
|
||||
&& message.contains("Cannot configure aggregator member")
|
||||
{
|
||||
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
|
||||
|
||||
if let Some(caps) = re.captures(message) {
|
||||
let interface_type = &caps[1];
|
||||
let port_location = &caps[2];
|
||||
let interface = format!("{interface_type} {port_location}");
|
||||
|
||||
return Error::CommandError(format!(
|
||||
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||
Ok(self.version.clone())
|
||||
}
|
||||
|
||||
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||
let output = self
|
||||
.shell
|
||||
.run_command("show mac-address-table", ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
output
|
||||
.lines()
|
||||
.skip(1)
|
||||
.filter_map(|line| self.parse_mac_entry(line))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||
let output = self
|
||||
.shell
|
||||
.run_command("show fabric isl", ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
output
|
||||
.lines()
|
||||
.skip(6)
|
||||
.filter_map(|line| self.parse_inter_switch_link_entry(line))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||
let output = self
|
||||
.shell
|
||||
.run_command(
|
||||
"show interface status rbridge-id all",
|
||||
ExecutionMode::Regular,
|
||||
)
|
||||
.await?;
|
||||
|
||||
output
|
||||
.lines()
|
||||
.skip(2)
|
||||
.filter_map(|line| self.parse_interface_status_entry(line))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
||||
|
||||
let mut commands = vec!["configure terminal".to_string()];
|
||||
|
||||
for interface in interfaces {
|
||||
commands.push(format!("interface {}", interface.0));
|
||||
|
||||
match interface.1 {
|
||||
PortOperatingMode::Fabric => {
|
||||
commands.push("fabric isl enable".into());
|
||||
commands.push("fabric trunk enable".into());
|
||||
}
|
||||
PortOperatingMode::Trunk => {
|
||||
commands.push("switchport".into());
|
||||
commands.push("switchport mode trunk".into());
|
||||
commands.push("no spanning-tree shutdown".into());
|
||||
commands.push("no fabric isl enable".into());
|
||||
commands.push("no fabric trunk enable".into());
|
||||
}
|
||||
PortOperatingMode::Access => {
|
||||
commands.push("switchport".into());
|
||||
commands.push("switchport mode access".into());
|
||||
commands.push("switchport access vlan 1".into());
|
||||
commands.push("no spanning-tree shutdown".into());
|
||||
commands.push("no fabric isl enable".into());
|
||||
commands.push("no fabric trunk enable".into());
|
||||
}
|
||||
}
|
||||
|
||||
commands.push("no shutdown".into());
|
||||
commands.push("exit".into());
|
||||
}
|
||||
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Regular)
|
||||
.await
|
||||
.map_err(|err| self.map_configure_interfaces_error(err))?;
|
||||
|
||||
info!("[Brocade] Interfaces configured.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||
info!("[Brocade] Finding next available channel id...");
|
||||
|
||||
let output = self
|
||||
.shell
|
||||
.run_command("show port-channel summary", ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
let used_ids: Vec<u8> = output
|
||||
.lines()
|
||||
.skip(6)
|
||||
.filter_map(|line| {
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 8 {
|
||||
return None;
|
||||
}
|
||||
|
||||
u8::from_str(parts[0]).ok()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut next_id: u8 = 1;
|
||||
loop {
|
||||
if !used_ids.contains(&next_id) {
|
||||
break;
|
||||
}
|
||||
next_id += 1;
|
||||
}
|
||||
|
||||
info!("[Brocade] Found channel id: {next_id}");
|
||||
Ok(next_id)
|
||||
}
|
||||
|
||||
async fn create_port_channel(
|
||||
&self,
|
||||
channel_id: PortChannelId,
|
||||
channel_name: &str,
|
||||
ports: &[PortLocation],
|
||||
) -> Result<(), Error> {
|
||||
info!(
|
||||
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
|
||||
ports
|
||||
.iter()
|
||||
.map(|p| format!("{p}"))
|
||||
.collect::<Vec<String>>()
|
||||
.join(", ")
|
||||
);
|
||||
|
||||
let interfaces = self.get_interfaces().await?;
|
||||
|
||||
let mut commands = vec![
|
||||
"configure terminal".into(),
|
||||
format!("interface port-channel {}", channel_id),
|
||||
"no shutdown".into(),
|
||||
"exit".into(),
|
||||
];
|
||||
|
||||
for port in ports {
|
||||
let interface = interfaces.iter().find(|i| i.port_location == *port);
|
||||
let Some(interface) = interface else {
|
||||
continue;
|
||||
};
|
||||
|
||||
commands.push(format!("interface {}", interface.name));
|
||||
commands.push("no switchport".into());
|
||||
commands.push("no ip address".into());
|
||||
commands.push("no fabric isl enable".into());
|
||||
commands.push("no fabric trunk enable".into());
|
||||
commands.push(format!("channel-group {channel_id} mode active"));
|
||||
commands.push("no shutdown".into());
|
||||
commands.push("exit".into());
|
||||
}
|
||||
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
||||
info!("[Brocade] Clearing port-channel: {channel_name}");
|
||||
|
||||
let commands = vec![
|
||||
"configure terminal".into(),
|
||||
format!("no interface port-channel {}", channel_name),
|
||||
"exit".into(),
|
||||
];
|
||||
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Regular)
|
||||
.await?;
|
||||
|
||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
370
brocade/src/shell.rs
Normal file
@@ -0,0 +1,370 @@
|
||||
use std::net::IpAddr;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::BrocadeOptions;
|
||||
use crate::Error;
|
||||
use crate::ExecutionMode;
|
||||
use crate::TimeoutConfig;
|
||||
use crate::ssh;
|
||||
|
||||
use log::debug;
|
||||
use log::info;
|
||||
use russh::ChannelMsg;
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeShell {
|
||||
ip: IpAddr,
|
||||
port: u16,
|
||||
username: String,
|
||||
password: String,
|
||||
options: BrocadeOptions,
|
||||
before_all_commands: Vec<String>,
|
||||
after_all_commands: Vec<String>,
|
||||
}
|
||||
|
||||
impl BrocadeShell {
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
) -> Result<Self, Error> {
|
||||
let ip = ip_addresses
|
||||
.first()
|
||||
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
||||
|
||||
let base_options = options.unwrap_or_default();
|
||||
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
||||
|
||||
Ok(Self {
|
||||
ip: *ip,
|
||||
port,
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
before_all_commands: vec![],
|
||||
after_all_commands: vec![],
|
||||
options,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
||||
BrocadeSession::open(
|
||||
self.ip,
|
||||
self.port,
|
||||
&self.username,
|
||||
&self.password,
|
||||
self.options.clone(),
|
||||
mode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error>
|
||||
where
|
||||
F: FnOnce(
|
||||
&mut BrocadeSession,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>,
|
||||
>,
|
||||
{
|
||||
let mut session = self.open_session(mode).await?;
|
||||
|
||||
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||
let result = callback(&mut session).await;
|
||||
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||
|
||||
session.close().await?;
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
|
||||
let mut session = self.open_session(mode).await?;
|
||||
|
||||
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||
let result = session.run_command(command).await;
|
||||
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||
|
||||
session.close().await?;
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn run_commands(
|
||||
&self,
|
||||
commands: Vec<String>,
|
||||
mode: ExecutionMode,
|
||||
) -> Result<(), Error> {
|
||||
let mut session = self.open_session(mode).await?;
|
||||
|
||||
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||
let result = session.run_commands(commands).await;
|
||||
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||
|
||||
session.close().await?;
|
||||
result
|
||||
}
|
||||
|
||||
pub fn before_all(&mut self, commands: Vec<String>) {
|
||||
self.before_all_commands = commands;
|
||||
}
|
||||
|
||||
pub fn after_all(&mut self, commands: Vec<String>) {
|
||||
self.after_all_commands = commands;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BrocadeSession {
|
||||
pub channel: russh::Channel<russh::client::Msg>,
|
||||
pub mode: ExecutionMode,
|
||||
pub options: BrocadeOptions,
|
||||
}
|
||||
|
||||
impl BrocadeSession {
|
||||
pub async fn open(
|
||||
ip: IpAddr,
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: BrocadeOptions,
|
||||
mode: ExecutionMode,
|
||||
) -> Result<Self, Error> {
|
||||
let client = ssh::create_client(ip, port, username, password, &options).await?;
|
||||
let mut channel = client.channel_open_session().await?;
|
||||
|
||||
channel
|
||||
.request_pty(false, "vt100", 80, 24, 0, 0, &[])
|
||||
.await?;
|
||||
channel.request_shell(false).await?;
|
||||
|
||||
wait_for_shell_ready(&mut channel, &options.timeouts).await?;
|
||||
|
||||
if let ExecutionMode::Privileged = mode {
|
||||
try_elevate_session(&mut channel, username, password, &options.timeouts).await?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
channel,
|
||||
mode,
|
||||
options,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn close(&mut self) -> Result<(), Error> {
|
||||
debug!("[Brocade] Closing session...");
|
||||
|
||||
self.channel.data(&b"exit\n"[..]).await?;
|
||||
if let ExecutionMode::Privileged = self.mode {
|
||||
self.channel.data(&b"exit\n"[..]).await?;
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
while start.elapsed() < self.options.timeouts.cleanup {
|
||||
match timeout(self.options.timeouts.message_wait, self.channel.wait()).await {
|
||||
Ok(Some(ChannelMsg::Close)) => break,
|
||||
Ok(Some(_)) => continue,
|
||||
Ok(None) | Err(_) => break,
|
||||
}
|
||||
}
|
||||
|
||||
debug!("[Brocade] Session closed.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
|
||||
if self.should_skip_command(command) {
|
||||
return Ok(String::new());
|
||||
}
|
||||
|
||||
debug!("[Brocade] Running command: '{command}'...");
|
||||
|
||||
self.channel
|
||||
.data(format!("{}\n", command).as_bytes())
|
||||
.await?;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let output = self.collect_command_output().await?;
|
||||
let output = String::from_utf8(output)
|
||||
.map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?;
|
||||
|
||||
self.check_for_command_errors(&output, command)?;
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> {
|
||||
for command in commands {
|
||||
self.run_command(&command).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn should_skip_command(&self, command: &str) -> bool {
|
||||
if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
|
||||
info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
|
||||
let mut output = Vec::new();
|
||||
let start = Instant::now();
|
||||
let read_timeout = Duration::from_millis(500);
|
||||
let log_interval = Duration::from_secs(5);
|
||||
let mut last_log = Instant::now();
|
||||
|
||||
loop {
|
||||
if start.elapsed() > self.options.timeouts.command_execution {
|
||||
return Err(Error::TimeoutError(
|
||||
"Timeout waiting for command completion.".into(),
|
||||
));
|
||||
}
|
||||
|
||||
if start.elapsed() > self.options.timeouts.command_output
|
||||
&& last_log.elapsed() > log_interval
|
||||
{
|
||||
info!("[Brocade] Waiting for command output...");
|
||||
last_log = Instant::now();
|
||||
}
|
||||
|
||||
match timeout(read_timeout, self.channel.wait()).await {
|
||||
Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => {
|
||||
output.extend_from_slice(&data);
|
||||
let current_output = String::from_utf8_lossy(&output);
|
||||
if current_output.contains('>') || current_output.contains('#') {
|
||||
return Ok(output);
|
||||
}
|
||||
}
|
||||
Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output),
|
||||
Ok(Some(ChannelMsg::ExitStatus { exit_status })) => {
|
||||
debug!("[Brocade] Command exit status: {exit_status}");
|
||||
}
|
||||
Ok(Some(_)) => continue,
|
||||
Ok(None) | Err(_) => {
|
||||
if output.is_empty() {
|
||||
if let Ok(None) = timeout(read_timeout, self.channel.wait()).await {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
let current_output = String::from_utf8_lossy(&output);
|
||||
if current_output.contains('>') || current_output.contains('#') {
|
||||
return Ok(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
|
||||
const ERROR_PATTERNS: &[&str] = &[
|
||||
"invalid input",
|
||||
"syntax error",
|
||||
"command not found",
|
||||
"unknown command",
|
||||
"permission denied",
|
||||
"access denied",
|
||||
"authentication failed",
|
||||
"configuration error",
|
||||
"failed to",
|
||||
"error:",
|
||||
];
|
||||
|
||||
let output_lower = output.to_lowercase();
|
||||
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
||||
return Err(Error::CommandError(format!(
|
||||
"Command error: {}",
|
||||
output.trim()
|
||||
)));
|
||||
}
|
||||
|
||||
if !command.starts_with("show") && output.trim().is_empty() {
|
||||
return Err(Error::CommandError(format!(
|
||||
"Command '{command}' produced no output"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_for_shell_ready(
|
||||
channel: &mut russh::Channel<russh::client::Msg>,
|
||||
timeouts: &TimeoutConfig,
|
||||
) -> Result<(), Error> {
|
||||
let mut buffer = Vec::new();
|
||||
let start = Instant::now();
|
||||
|
||||
while start.elapsed() < timeouts.shell_ready {
|
||||
match timeout(timeouts.message_wait, channel.wait()).await {
|
||||
Ok(Some(ChannelMsg::Data { data })) => {
|
||||
buffer.extend_from_slice(&data);
|
||||
let output = String::from_utf8_lossy(&buffer);
|
||||
let output = output.trim();
|
||||
if output.ends_with('>') || output.ends_with('#') {
|
||||
debug!("[Brocade] Shell ready");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Ok(Some(_)) => continue,
|
||||
Ok(None) => break,
|
||||
Err(_) => continue,
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_elevate_session(
|
||||
channel: &mut russh::Channel<russh::client::Msg>,
|
||||
username: &str,
|
||||
password: &str,
|
||||
timeouts: &TimeoutConfig,
|
||||
) -> Result<(), Error> {
|
||||
channel.data(&b"enable\n"[..]).await?;
|
||||
let start = Instant::now();
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
while start.elapsed() < timeouts.shell_ready {
|
||||
match timeout(timeouts.message_wait, channel.wait()).await {
|
||||
Ok(Some(ChannelMsg::Data { data })) => {
|
||||
buffer.extend_from_slice(&data);
|
||||
let output = String::from_utf8_lossy(&buffer);
|
||||
|
||||
if output.ends_with('#') {
|
||||
debug!("[Brocade] Privileged mode established");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if output.contains("User Name:") {
|
||||
channel.data(format!("{}\n", username).as_bytes()).await?;
|
||||
buffer.clear();
|
||||
} else if output.contains("Password:") {
|
||||
channel.data(format!("{}\n", password).as_bytes()).await?;
|
||||
buffer.clear();
|
||||
} else if output.contains('>') {
|
||||
return Err(Error::AuthenticationError(
|
||||
"Enable authentication failed".into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(Some(_)) => continue,
|
||||
Ok(None) => break,
|
||||
Err(_) => continue,
|
||||
}
|
||||
}
|
||||
|
||||
let output = String::from_utf8_lossy(&buffer);
|
||||
if output.ends_with('#') {
|
||||
debug!("[Brocade] Privileged mode established");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::AuthenticationError(format!(
|
||||
"Enable failed. Output:\n{output}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
113
brocade/src/ssh.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use russh::client::Handler;
|
||||
use russh::kex::DH_G1_SHA1;
|
||||
use russh::kex::ECDH_SHA2_NISTP256;
|
||||
use russh_keys::key::SSH_RSA;
|
||||
|
||||
use super::BrocadeOptions;
|
||||
use super::Error;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct SshOptions {
|
||||
pub preferred_algorithms: russh::Preferred,
|
||||
}
|
||||
|
||||
impl SshOptions {
|
||||
fn ecdhsa_sha2_nistp256() -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn legacy() -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Client;
|
||||
|
||||
#[async_trait]
|
||||
impl Handler for Client {
|
||||
type Error = Error;
|
||||
|
||||
async fn check_server_key(
|
||||
&mut self,
|
||||
_server_public_key: &russh_keys::key::PublicKey,
|
||||
) -> Result<bool, Self::Error> {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn try_init_client(
|
||||
username: &str,
|
||||
password: &str,
|
||||
ip: &std::net::IpAddr,
|
||||
base_options: BrocadeOptions,
|
||||
) -> Result<BrocadeOptions, Error> {
|
||||
let ssh_options = vec![
|
||||
SshOptions::default(),
|
||||
SshOptions::ecdhsa_sha2_nistp256(),
|
||||
SshOptions::legacy(),
|
||||
];
|
||||
|
||||
for ssh in ssh_options {
|
||||
let opts = BrocadeOptions {
|
||||
ssh,
|
||||
..base_options.clone()
|
||||
};
|
||||
let client = create_client(*ip, 22, username, password, &opts).await;
|
||||
|
||||
match client {
|
||||
Ok(_) => {
|
||||
return Ok(opts);
|
||||
}
|
||||
Err(e) => match e {
|
||||
Error::NetworkError(e) => {
|
||||
if e.contains("No common key exchange algorithm") {
|
||||
continue;
|
||||
} else {
|
||||
return Err(Error::NetworkError(e));
|
||||
}
|
||||
}
|
||||
_ => return Err(e),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::NetworkError(
|
||||
"Could not establish ssh connection: wrong key exchange algorithm)".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn create_client(
|
||||
ip: std::net::IpAddr,
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: &BrocadeOptions,
|
||||
) -> Result<russh::client::Handle<Client>, Error> {
|
||||
let config = russh::client::Config {
|
||||
preferred: options.ssh.preferred_algorithms.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?;
|
||||
if !client.authenticate_password(username, password).await? {
|
||||
return Err(Error::AuthenticationError(
|
||||
"ssh authentication failed".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(client)
|
||||
}
|
||||
1
check.sh
@@ -1,6 +1,7 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
rustc --version
|
||||
cargo check --all-targets --all-features --keep-going
|
||||
cargo fmt --check
|
||||
cargo clippy
|
||||
|
||||
BIN
data/okd/bin/kubectl
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/oc
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/oc_README.md
(Stored with Git LFS)
Normal file
BIN
data/okd/bin/openshift-install
(Stored with Git LFS)
Executable file
BIN
data/okd/bin/openshift-install_README.md
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
(Stored with Git LFS)
Normal file
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
(Stored with Git LFS)
Normal file
1
data/okd/installer_image/scos-live-initramfs.x86_64.img
Symbolic link
@@ -0,0 +1 @@
|
||||
scos-9.0.20250510-0-live-initramfs.x86_64.img
|
||||
1
data/okd/installer_image/scos-live-kernel.x86_64
Symbolic link
@@ -0,0 +1 @@
|
||||
scos-9.0.20250510-0-live-kernel.x86_64
|
||||
1
data/okd/installer_image/scos-live-rootfs.x86_64.img
Symbolic link
@@ -0,0 +1 @@
|
||||
scos-9.0.20250510-0-live-rootfs.x86_64.img
|
||||
8
data/pxe/okd/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
Here lies all the data files required for an OKD cluster PXE boot setup.
|
||||
|
||||
This inclues ISO files, binary boot files, ipxe, etc.
|
||||
|
||||
TODO as of august 2025 :
|
||||
|
||||
- `harmony_inventory_agent` should be downloaded from official releases, this embedded version is practical for now though
|
||||
- The cluster ssh key should be generated and handled by harmony with the private key saved in a secret store
|
||||
9
data/pxe/okd/http_files/.gitattributes
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
harmony_inventory_agent filter=lfs diff=lfs merge=lfs -text
|
||||
os filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9 filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/initrd.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/vmlinuz filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/efiboot.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/install.img filter=lfs diff=lfs merge=lfs -text
|
||||
os/centos-stream-9/images/pxeboot filter=lfs diff=lfs merge=lfs -text
|
||||
1
data/pxe/okd/http_files/cluster_ssh_key.pub
Normal file
@@ -0,0 +1 @@
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Executable file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/install.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/pxeboot/vmlinuz
Executable file
BIN
data/pxe/okd/http_files/os/centos-stream-9/initrd.img
(Stored with Git LFS)
Normal file
BIN
data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
(Stored with Git LFS)
Executable file
BIN
data/pxe/okd/tftpboot/ipxe.efi
Normal file
BIN
data/pxe/okd/tftpboot/undionly.kpxe
Normal file
3
demos/cncf-k8s-quebec-meetup-september-2025/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
.terraform
|
||||
*.tfstate
|
||||
venv
|
||||
BIN
demos/cncf-k8s-quebec-meetup-september-2025/75_years_later.jpg
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer.jpg
Normal file
|
After Width: | Height: | Size: 38 KiB |
|
After Width: | Height: | Size: 38 KiB |
|
After Width: | Height: | Size: 52 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 64 KiB |
|
After Width: | Height: | Size: 100 KiB |
5
demos/cncf-k8s-quebec-meetup-september-2025/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
To build :
|
||||
|
||||
```bash
|
||||
npx @marp-team/marp-cli@latest -w slides.md
|
||||
```
|
||||
BIN
demos/cncf-k8s-quebec-meetup-september-2025/ansible.jpg
Normal file
|
After Width: | Height: | Size: 11 KiB |
@@ -0,0 +1,9 @@
|
||||
To run this :
|
||||
|
||||
```bash
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install ansible ansible-dev-tools
|
||||
ansible-lint download.yml
|
||||
ansible-playbook -i localhost download.yml
|
||||
```
|
||||
@@ -0,0 +1,8 @@
|
||||
- name: Test Ansible URL Validation
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Download a file
|
||||
ansible.builtin.get_url:
|
||||
url: "http:/wikipedia.org/"
|
||||
dest: "/tmp/ansible-test/wikipedia.html"
|
||||
mode: '0900'
|
||||
|
After Width: | Height: | Size: 22 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/ansible_fail.jpg
Normal file
|
After Width: | Height: | Size: 275 KiB |
|
After Width: | Height: | Size: 212 KiB |
|
After Width: | Height: | Size: 384 KiB |
|
After Width: | Height: | Size: 8.3 KiB |
195
demos/cncf-k8s-quebec-meetup-september-2025/slides.html
Normal file
241
demos/cncf-k8s-quebec-meetup-september-2025/slides.md
Normal file
@@ -0,0 +1,241 @@
|
||||
---
|
||||
theme: uncover
|
||||
---
|
||||
|
||||
# Voici l'histoire de Petit Poisson
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./happy_landscape_swimmer.jpg" width="1000"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
||||
|
||||
<img src="./tryrust.org.png" width="600"/>
|
||||
|
||||
[https://tryrust.org](https://tryrust.org)
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_1.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_2.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_3.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_deploy_prod_4.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
## Demo time
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_sunglasses.jpg" width="1000"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_download_wikipedia.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible.jpg" width="200"/>
|
||||
|
||||
## Ansible❓
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
||||
|
||||
```yaml
|
||||
- name: Download wikipedia
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Download a file
|
||||
ansible.builtin.get_url:
|
||||
url: "https:/wikipedia.org/"
|
||||
dest: "/tmp/ansible-test/wikipedia.html"
|
||||
mode: '0900'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="200"/>
|
||||
|
||||
```
|
||||
ansible-lint download.yml
|
||||
|
||||
Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
git push
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
<img src="./75_years_later.jpg" width="1100"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./texto_download_wikipedia_fail.png" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed.jpg" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible_output_fail.jpg" width="1100"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="600"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible_crossed_out.jpg" width="400"/>
|
||||
|
||||
---
|
||||
|
||||
|
||||
<img src="./terraform.jpg" width="400"/>
|
||||
|
||||
## Terraform❓❗
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="200"/>
|
||||
<img src="./terraform.jpg" width="200"/>
|
||||
|
||||
```tf
|
||||
provider "docker" {}
|
||||
|
||||
resource "docker_network" "invalid_network" {
|
||||
name = "my-invalid-network"
|
||||
|
||||
ipam_config {
|
||||
subnet = "172.17.0.0/33"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer_reversed_1hit.jpg" width="100"/>
|
||||
<img src="./terraform.jpg" width="200"/>
|
||||
|
||||
```
|
||||
terraform plan
|
||||
|
||||
Terraform used the selected providers to generate the following execution plan.
|
||||
Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
# docker_network.invalid_network will be created
|
||||
+ resource "docker_network" "invalid_network" {
|
||||
+ driver = (known after apply)
|
||||
+ id = (known after apply)
|
||||
+ internal = (known after apply)
|
||||
+ ipam_driver = "default"
|
||||
+ name = "my-invalid-network"
|
||||
+ options = (known after apply)
|
||||
+ scope = (known after apply)
|
||||
|
||||
+ ipam_config {
|
||||
+ subnet = "172.17.0.0/33"
|
||||
# (2 unchanged attributes hidden)
|
||||
}
|
||||
}
|
||||
|
||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
✅
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
terraform apply
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
||||
|
||||
Do you want to perform these actions?
|
||||
Terraform will perform the actions described above.
|
||||
Only 'yes' will be accepted to approve.
|
||||
|
||||
Enter a value: yes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
docker_network.invalid_network: Creating...
|
||||
╷
|
||||
│ Error: Unable to create network: Error response from daemon: invalid network config:
|
||||
│ invalid subnet 172.17.0.0/33: invalid CIDR block notation
|
||||
│
|
||||
│ with docker_network.invalid_network,
|
||||
│ on main.tf line 11, in resource "docker_network" "invalid_network":
|
||||
│ 11: resource "docker_network" "invalid_network" {
|
||||
│
|
||||
╵
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
|
||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="1100"/>
|
||||
|
||||
---
|
||||
|
||||
<img src="./ansible_crossed_out.jpg" width="300"/>
|
||||
<img src="./terraform_crossed_out.jpg" width="400"/>
|
||||
<img src="./Happy_swimmer_reversed_fullhit.jpg" width="300"/>
|
||||
|
||||
---
|
||||
|
||||
## Harmony❓❗
|
||||
|
||||
---
|
||||
|
||||
Demo time
|
||||
|
||||
---
|
||||
|
||||
<img src="./Happy_swimmer.jpg" width="300"/>
|
||||
|
||||
---
|
||||
|
||||
# 🎼
|
||||
|
||||
Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony)
|
||||
|
||||
|
||||
<img src="./qrcode_gitea_nationtech.png" width="120"/>
|
||||
|
||||
|
||||
LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/)
|
||||
|
||||
Courriel : [jg@nationtech.io](mailto:jg@nationtech.io)
|
||||
132
demos/cncf-k8s-quebec-meetup-september-2025/storyline.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# Harmony, Orchestrateur d'infrastructure open-source
|
||||
|
||||
**Target Duration:** 25 minutes\
|
||||
**Tone:** Friendly, expert-to-expert, inspiring.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 1: Title Slide**
|
||||
|
||||
- **Visual:** Clean and simple. Your company logo (NationTech) and the Harmony logo.
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 2: The YAML Labyrinth**
|
||||
|
||||
**Goal:** Get every head in the room nodding in agreement. Start with their world, not yours.
|
||||
|
||||
- **Visual:**
|
||||
- Option A: "The Pull Request from Hell". A screenshot of a GitHub pull request for a seemingly minor change that touches dozens of YAML files across multiple directories. A sea of red and green diffs that is visually overwhelming.
|
||||
- Option B: A complex flowchart connecting dozens of logos: Terraform, Ansible, K8s, Helm, etc.
|
||||
- **Narration:**\
|
||||
[...ADD SOMETHING FOR INTRODUCTION...]\
|
||||
"We love the power that tools like Kubernetes and the CNCF landscape have given us. But let's be honest... when did our infrastructure code start looking like _this_?"\
|
||||
"We have GitOps, which is great. But it often means we're managing this fragile cathedral of YAML, Helm charts, and brittle scripts. We spend more time debugging indentation and tracing variables than we do building truly resilient systems."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 3: The Real Cost of Infrastructure**
|
||||
|
||||
- **Visual:** "The Jenga Tower of Tools". A tall, precarious Jenga tower where each block is the logo of a different tool (Terraform, K8s, Helm, Ansible, Prometheus, ArgoCD, etc.). One block near the bottom is being nervously pulled out.
|
||||
- **Narration:**
|
||||
"The real cost isn't just complexity; it's the constant need to choose, learn, integrate, and operate a dozen different tools, each with its own syntax and failure modes. It's the nagging fear that a tiny typo in a config file could bring everything down. Click-ops isn't the answer, but the current state of IaC feels like we've traded one problem for another."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 4: The Broken Promise of "Code"**
|
||||
|
||||
**Goal:** Introduce the core idea before introducing the product. This makes the solution feel inevitable.
|
||||
|
||||
- **(Initial Visual):** A two-panel slide.
|
||||
- **Left Panel Title: "The Plan"** - A terminal showing a green, successful `terraform plan` output.
|
||||
- **Right Panel Title: "The Reality"** - The _next_ screen in the terminal, showing the `terraform apply` failing with a cascade of red error text.
|
||||
- **Narration:**
|
||||
"We call our discipline **Infrastructure as Code**. And we've all been here. Our 'compiler' is a `terraform plan` that says everything looks perfect. We get the green light."
|
||||
(Pause for a beat)
|
||||
"And then we `apply`, and reality hits. It fails halfway through, at runtime, when it's most expensive and painful to fix."
|
||||
|
||||
**(Click to transition the slide)**
|
||||
|
||||
- **(New Visual):** The entire slide is replaced by a clean screenshot of a code editor (like nvim 😉) showing Harmony's Rust DSL. A red squiggly line is under a config line. The error message is clear in the "Problems" panel: `error: Incompatible deployment. Production target 'gcp-prod-cluster' requires a StorageClass with 'snapshots' capability, but 'standard-sc' does not provide it.`
|
||||
- **Narration (continued):**
|
||||
"In software development, we solved these problems years ago. We don't accept 'it compiled, but crashed on startup'. We have real tools, type systems, compilers, test frameworks, and IDEs that catch our mistakes before they ever reach production. **So, what if we could treat our entire infrastructure... like a modern, compiled application?**"
|
||||
"What if your infrastructure code could get compile-time checks, straight into the editor... instead of runtime panics and failures at 3 AM in production?"
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 5: Introducing Harmony**
|
||||
|
||||
**Goal:** Introduce Harmony as the answer to the "What If?" question.
|
||||
|
||||
- **Visual:** The Harmony logo, large and centered.
|
||||
- **Tagline:** `Infrastructure in type-safe Rust. No YAML required.`
|
||||
- **Narration:**
|
||||
"This is Harmony. It's an open-source orchestrator that lets you define your entire stack — from a dev laptop to a multi-site bare-metal cluster—in a single, type-safe Rust codebase."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 6: Before & After**
|
||||
|
||||
- **Visual:** A side-by-side comparison. Left side: A screen full of complex, nested YAML. Right side: 10-15 lines of clean, readable Harmony Rust DSL that accomplishes the same thing.
|
||||
- **Narration:**
|
||||
"This is the difference. On the left, the fragile world of strings and templates. On the right, a portable, verifiable program that describes your apps, your infra, and your operations. We unify scaffolding, provisioning, and Day-2 ops, all verified by the Rust compiler. But enough slides... let's see it in action."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 7: Live Demo: Zero to Monitored App**
|
||||
|
||||
**Goal:** Show, don't just tell. Make it look effortless. This is where you build the "dream."
|
||||
|
||||
- **Visual:** Your terminal/IDE, ready to go.
|
||||
- **Narration Guide:**
|
||||
"Okay, for this demo, we're going to take a standard web app from GitHub. Nothing special about it."
|
||||
_(Show the repo)_
|
||||
"Now, let's bring it into Harmony. This is the entire definition we need to describe the application and its needs."
|
||||
_(Show the Rust DSL)_
|
||||
"First, let's run it locally on k3d. The exact same definition for dev as for prod."
|
||||
_(Deploy locally, show it works)_
|
||||
"Cool. But a real app needs monitoring. In Harmony, that's just adding a feature to our code."
|
||||
_(Uncomment one line: `.with_feature(Monitoring)` and redeploy)_
|
||||
"And just like that, we have a fully configured Prometheus and Grafana stack, scraping our app. No YAML, no extra config."
|
||||
"Finally, let's push this to our production staging cluster. We just change the target and specify our multi-site Ceph storage."
|
||||
_(Deploy to the remote cluster)_
|
||||
"And there it is. We've gone from a simple web app to a monitored, enterprise-grade service in minutes."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 8: Live Demo: Embracing Chaos**
|
||||
|
||||
**Goal:** Prove the "predictable" and "resilient" claims in the most dramatic way possible.
|
||||
|
||||
- **Visual:** A slide showing a map or diagram of your distributed infrastructure (the different data centers). Then switch back to your terminal.
|
||||
- **Narration Guide:**
|
||||
"This is great when things are sunny. But production is chaos. So... let's break things. On purpose."
|
||||
"First, a network failure." _(Kill a switch/link, show app is still up)_
|
||||
"Now, let's power off a storage server." _(Force off a server, show Ceph healing and the app is unaffected)_
|
||||
"How about a control plane node?" _(Force off a k8s control plane, show the cluster is still running)_
|
||||
"Okay, for the grand finale. What if we have a cascading failure? I'm going to kill _another_ storage server. This should cause a total failure in this data center."
|
||||
_(Force off the second server, narrate what's happening)_
|
||||
"And there it is... Ceph has lost quorum in this site... and Harmony has automatically failed everything over to our other datacenter. The app is still running."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 9: The New Reality**
|
||||
|
||||
**Goal:** Summarize the dream and tell the audience what you want them to do.
|
||||
|
||||
- **Visual:** The clean, simple Harmony Rust DSL code from Slide 6. A summary of what was just accomplished is listed next to it: `✓ GitHub to Prod in minutes`, `✓ Type-Safe Validation`, `✓ Built-in Monitoring`, `✓ Automated Multi-Site Failover`.
|
||||
- **Narration:**
|
||||
"So, in just a few minutes, we went from a simple web app to a multi-site, monitored, and chaos-proof production deployment. We did it with a small amount of code that is easy to read, easy to verify, and completely portable. This is our vision: to offload the complexity, and make infrastructure simple, predictable, and even fun again."
|
||||
|
||||
---
|
||||
|
||||
#### **Slide 10: Join Us**
|
||||
|
||||
- **Visual:** A clean, final slide with QR codes and links.
|
||||
- GitHub Repo (`github.com/nation-tech/harmony`)
|
||||
- Website (`harmony.sh` or similar)
|
||||
- Your contact info (`jg@nation.tech` / LinkedIn / Twitter)
|
||||
- **Narration:**
|
||||
"Harmony is open-source, AGPLv3. We believe this is the future, but we're just getting started. We know this crowd has great infrastructure minds out there, and we need your feedback. Please, check out the project on GitHub. Star it if you like what you see. Tell us what's missing. Let's build this future together. Thank you."
|
||||
|
||||
**(Open for Q&A)**
|
||||
BIN
demos/cncf-k8s-quebec-meetup-september-2025/terraform.jpg
Normal file
|
After Width: | Height: | Size: 11 KiB |
40
demos/cncf-k8s-quebec-meetup-september-2025/terraform/.terraform.lock.hcl
generated
Normal file
@@ -0,0 +1,40 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/http" {
|
||||
version = "3.5.0"
|
||||
hashes = [
|
||||
"h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=",
|
||||
"zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7",
|
||||
"zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736",
|
||||
"zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476",
|
||||
"zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90",
|
||||
"zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a",
|
||||
"zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6",
|
||||
"zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6",
|
||||
"zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee",
|
||||
"zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926",
|
||||
"zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/local" {
|
||||
version = "2.5.3"
|
||||
hashes = [
|
||||
"h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=",
|
||||
"zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927",
|
||||
"zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e",
|
||||
"zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf",
|
||||
"zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d",
|
||||
"zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09",
|
||||
"zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f",
|
||||
"zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1",
|
||||
"zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6",
|
||||
"zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47",
|
||||
"zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82",
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
provider "http" {}
|
||||
|
||||
data "http" "remote_file" {
|
||||
url = "http:/example.com/file.txt"
|
||||
}
|
||||
|
||||
resource "local_file" "downloaded_file" {
|
||||
content = data.http.remote_file.body
|
||||
filename = "${path.module}/downloaded_file.txt"
|
||||
}
|
||||
24
demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/.terraform.lock.hcl
generated
Normal file
@@ -0,0 +1,24 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/kreuzwerker/docker" {
|
||||
version = "3.0.2"
|
||||
constraints = "~> 3.0.1"
|
||||
hashes = [
|
||||
"h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=",
|
||||
"zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f",
|
||||
"zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95",
|
||||
"zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138",
|
||||
"zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da",
|
||||
"zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2",
|
||||
"zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06",
|
||||
"zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1",
|
||||
"zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710",
|
||||
"zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005",
|
||||
"zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d",
|
||||
"zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792",
|
||||
"zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387",
|
||||
"zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62",
|
||||
"zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d",
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
docker = {
|
||||
source = "kreuzwerker/docker"
|
||||
version = "~> 3.0.1" # Adjust version as needed
|
||||
}
|
||||
}
|
||||
}
|
||||
provider "docker" {}
|
||||
|
||||
resource "docker_network" "invalid_network" {
|
||||
name = "my-invalid-network"
|
||||
|
||||
ipam_config {
|
||||
subnet = "172.17.0.0/33"
|
||||
}
|
||||
}
|
||||
|
After Width: | Height: | Size: 14 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail.jpg
Normal file
|
After Width: | Height: | Size: 144 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 71 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 87 KiB |
|
After Width: | Height: | Size: 88 KiB |
|
After Width: | Height: | Size: 48 KiB |
BIN
demos/cncf-k8s-quebec-meetup-september-2025/tryrust.org.png
Normal file
|
After Width: | Height: | Size: 325 KiB |
8
docs/OKD_Host_preparation.md
Normal file
@@ -0,0 +1,8 @@
|
||||
## Bios settings
|
||||
|
||||
1. CSM : Disabled (compatibility support to boot gpt formatted drives)
|
||||
2. Secure boot : disabled
|
||||
3. Boot order :
|
||||
1. Local Hard drive
|
||||
2. PXE IPv4
|
||||
4. System clock, make sure it is adjusted, otherwise you will get invalid certificates error
|
||||
@@ -1 +1,33 @@
|
||||
Not much here yet, see the `adr` folder for now. More to come in time!
|
||||
# Harmony Documentation Hub
|
||||
|
||||
Welcome to the Harmony documentation. This is the main entry point for learning everything from core concepts to building your own Score, Topologies, and Capabilities.
|
||||
|
||||
## 1. Getting Started
|
||||
|
||||
If you're new to Harmony, start here:
|
||||
|
||||
- [**Getting Started Guide**](./guides/getting-started.md): A step-by-step tutorial that takes you from an empty project to deploying your first application.
|
||||
- [**Core Concepts**](./concepts.md): A high-level overview of the key concepts in Harmony: `Score`, `Topology`, `Capability`, `Inventory`, `Interpret`, ...
|
||||
|
||||
## 2. Use Cases & Examples
|
||||
|
||||
See how to use Harmony to solve real-world problems.
|
||||
|
||||
- [**OKD on Bare Metal**](./use-cases/okd-on-bare-metal.md): A detailed walkthrough of bootstrapping a high-availability OKD cluster from physical hardware.
|
||||
- [**Deploy a Rust Web App**](./use-cases/deploy-rust-webapp.md): A quick guide to deploying a monitored, containerized web application to a Kubernetes cluster.
|
||||
|
||||
## 3. Component Catalogs
|
||||
|
||||
Discover existing, reusable components you can use in your Harmony projects.
|
||||
|
||||
- [**Scores Catalog**](./catalogs/scores.md): A categorized list of all available `Scores` (the "what").
|
||||
- [**Topologies Catalog**](./catalogs/topologies.md): A list of all available `Topologies` (the "where").
|
||||
- [**Capabilities Catalog**](./catalogs/capabilities.md): A list of all available `Capabilities` (the "how").
|
||||
|
||||
## 4. Developer Guides
|
||||
|
||||
Ready to build your own components? These guides show you how.
|
||||
|
||||
- [**Writing a Score**](./guides/writing-a-score.md): Learn how to create your own `Score` and `Interpret` logic to define a new desired state.
|
||||
- [**Writing a Topology**](./guides/writing-a-topology.md): Learn how to model a new environment (like AWS, GCP, or custom hardware) as a `Topology`.
|
||||
- [**Adding Capabilities**](./guides/adding-capabilities.md): See how to add a `Capability` to your custom `Topology`.
|
||||
|
||||
7
docs/catalogs/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Component Catalogs
|
||||
|
||||
This section is the "dictionary" for Harmony. It lists all the reusable components available out-of-the-box.
|
||||
|
||||
- [**Scores Catalog**](./scores.md): Discover all available `Scores` (the "what").
|
||||
- [**Topologies Catalog**](./topologies.md): A list of all available `Topologies` (the "where").
|
||||
- [**Capabilities Catalog**](./capabilities.md): A list of all available `Capabilities` (the "how").
|
||||
40
docs/catalogs/capabilities.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Capabilities Catalog
|
||||
|
||||
A `Capability` is a specific feature or API that a `Topology` offers. `Interpret` logic uses these capabilities to execute a `Score`.
|
||||
|
||||
This list is primarily for developers **writing new Topologies or Scores**. As a user, you just need to know that the `Topology` you pick (like `K8sAnywhereTopology`) provides the capabilities your `Scores` (like `ApplicationScore`) need.
|
||||
|
||||
<!--toc:start-->
|
||||
|
||||
- [Capabilities Catalog](#capabilities-catalog)
|
||||
- [Kubernetes & Application](#kubernetes-application)
|
||||
- [Monitoring & Observability](#monitoring-observability)
|
||||
- [Networking (Core Services)](#networking-core-services)
|
||||
- [Networking (Hardware & Host)](#networking-hardware-host)
|
||||
|
||||
<!--toc:end-->
|
||||
|
||||
## Kubernetes & Application
|
||||
|
||||
- **K8sClient**: Provides an authenticated client to interact with a Kubernetes API (create/read/update/delete resources).
|
||||
- **HelmCommand**: Provides the ability to execute Helm commands (install, upgrade, template).
|
||||
- **TenantManager**: Provides methods for managing tenants in a multi-tenant cluster.
|
||||
- **Ingress**: Provides an interface for managing ingress controllers and resources.
|
||||
|
||||
## Monitoring & Observability
|
||||
|
||||
- **Grafana**: Provides an API for configuring Grafana (datasources, dashboards).
|
||||
- **Monitoring**: A general capability for configuring monitoring (e.g., creating Prometheus rules).
|
||||
|
||||
## Networking (Core Services)
|
||||
|
||||
- **DnsServer**: Provides an interface for creating and managing DNS records.
|
||||
- **LoadBalancer**: Provides an interface for configuring a load balancer (e.g., OPNsense, MetalLB).
|
||||
- **DhcpServer**: Provides an interface for managing DHCP leases and host bindings.
|
||||
- **TftpServer**: Provides an interface for managing files on a TFTP server (e.g., iPXE boot files).
|
||||
|
||||
## Networking (Hardware & Host)
|
||||
|
||||
- **Router**: Provides an interface for configuring routing rules, typically on a firewall like OPNsense.
|
||||
- **Switch**: Provides an interface for configuring a physical network switch (e.g., managing VLANs and port channels).
|
||||
- **NetworkManager**: Provides an interface for configuring host-level networking (e.g., creating bonds and bridges on a node).
|
||||
102
docs/catalogs/scores.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# Scores Catalog
|
||||
|
||||
A `Score` is a declarative description of a desired state. Find the Score you need and add it to your `harmony!` block's `scores` array.
|
||||
|
||||
<!--toc:start-->
|
||||
|
||||
- [Scores Catalog](#scores-catalog)
|
||||
- [Application Deployment](#application-deployment)
|
||||
- [OKD / Kubernetes Cluster Setup](#okd-kubernetes-cluster-setup)
|
||||
- [Cluster Services & Management](#cluster-services-management)
|
||||
- [Monitoring & Alerting](#monitoring-alerting)
|
||||
- [Infrastructure & Networking (Bare Metal)](#infrastructure-networking-bare-metal)
|
||||
- [Infrastructure & Networking (Cluster)](#infrastructure-networking-cluster)
|
||||
- [Tenant Management](#tenant-management)
|
||||
- [Utility](#utility)
|
||||
|
||||
<!--toc:end-->
|
||||
|
||||
## Application Deployment
|
||||
|
||||
Scores for deploying and managing end-user applications.
|
||||
|
||||
- **ApplicationScore**: The primary score for deploying a web application. Describes the application, its framework, and the features it requires (e.g., monitoring, CI/CD).
|
||||
- **HelmChartScore**: Deploys a generic Helm chart to a Kubernetes cluster.
|
||||
- **ArgoHelmScore**: Deploys an application using an ArgoCD Helm chart.
|
||||
- **LAMPScore**: A specialized score for deploying a classic LAMP (Linux, Apache, MySQL, PHP) stack.
|
||||
|
||||
## OKD / Kubernetes Cluster Setup
|
||||
|
||||
This collection of Scores is used to provision an entire OKD cluster from bare metal. They are typically used in order.
|
||||
|
||||
- **OKDSetup01InventoryScore**: Discovers and catalogs the physical hardware.
|
||||
- **OKDSetup02BootstrapScore**: Configures the bootstrap node, renders iPXE files, and kicks off the SCOS installation.
|
||||
- **OKDSetup03ControlPlaneScore**: Renders iPXE configurations for the control plane nodes.
|
||||
- **OKDSetupPersistNetworkBondScore**: Configures network bonds on the nodes and port channels on the switches.
|
||||
- **OKDSetup04WorkersScore**: Renders iPXE configurations for the worker nodes.
|
||||
- **OKDSetup06InstallationReportScore**: Runs post-installation checks and generates a report.
|
||||
- **OKDUpgradeScore**: Manages the upgrade process for an existing OKD cluster.
|
||||
|
||||
## Cluster Services & Management
|
||||
|
||||
Scores for installing and managing services _inside_ a Kubernetes cluster.
|
||||
|
||||
- **K3DInstallationScore**: Installs and configes a local K3D (k3s-in-docker) cluster. Used by `K8sAnywhereTopology`.
|
||||
- **CertManagerHelmScore**: Deploys the `cert-manager` Helm chart.
|
||||
- **ClusterIssuerScore**: Configures a `ClusterIssuer` for `cert-manager`, (e.g., for Let's Encrypt).
|
||||
- **K8sNamespaceScore**: Ensures a Kubernetes namespace exists.
|
||||
- **K8sDeploymentScore**: Deploys a generic `Deployment` resource to Kubernetes.
|
||||
- **K8sIngressScore**: Configures an `Ingress` resource for a service.
|
||||
|
||||
## Monitoring & Alerting
|
||||
|
||||
Scores for configuring observability, dashboards, and alerts.
|
||||
|
||||
- **ApplicationMonitoringScore**: A generic score to set up monitoring for an application.
|
||||
- **ApplicationRHOBMonitoringScore**: A specialized score for setting up monitoring via the Red Hat Observability stack.
|
||||
- **HelmPrometheusAlertingScore**: Configures Prometheus alerts via a Helm chart.
|
||||
- **K8sPrometheusCRDAlertingScore**: Configures Prometheus alerts using the `PrometheusRule` CRD.
|
||||
- **PrometheusAlertScore**: A generic score for creating a Prometheus alert.
|
||||
- **RHOBAlertingScore**: Configures alerts specifically for the Red Hat Observability stack.
|
||||
- **NtfyScore**: Configures alerts to be sent to a `ntfy.sh` server.
|
||||
|
||||
## Infrastructure & Networking (Bare Metal)
|
||||
|
||||
Low-level scores for managing physical hardware and network services.
|
||||
|
||||
- **DhcpScore**: Configures a DHCP server.
|
||||
- **OKDDhcpScore**: A specialized DHCP configuration for the OKD bootstrap process.
|
||||
- **OKDBootstrapDhcpScore**: Configures DHCP specifically for the bootstrap node.
|
||||
- **DhcpHostBindingScore**: Creates a specific MAC-to-IP binding in the DHCP server.
|
||||
- **DnsScore**: Configures a DNS server.
|
||||
- **OKDDnsScore**: A specialized DNS configuration for the OKD cluster (e.g., `api.*`, `*.apps.*`).
|
||||
- **StaticFilesHttpScore**: Serves a directory of static files (e.g., a documentation site) over HTTP.
|
||||
- **TftpScore**: Configures a TFTP server, typically for serving iPXE boot files.
|
||||
- **IPxeMacBootFileScore**: Assigns a specific iPXE boot file to a MAC address in the TFTP server.
|
||||
- **OKDIpxeScore**: A specialized score for generating the iPXE boot scripts for OKD.
|
||||
- **OPNsenseShellCommandScore**: Executes a shell command on an OPNsense firewall.
|
||||
|
||||
## Infrastructure & Networking (Cluster)
|
||||
|
||||
Network services that run inside the cluster or as part of the topology.
|
||||
|
||||
- **LoadBalancerScore**: Configures a general-purpose load balancer.
|
||||
- **OKDLoadBalancerScore**: Configures the high-availability load balancers for the OKD API and ingress.
|
||||
- **OKDBootstrapLoadBalancerScore**: Configures the load balancer specifically for the bootstrap-time API endpoint.
|
||||
- **K8sIngressScore**: Configures an Ingress controller or resource.
|
||||
- [HighAvailabilityHostNetworkScore](../../harmony/src/modules/okd/host_network.rs): Configures network bonds on a host and the corresponding port-channels on the switch stack for high-availability.
|
||||
|
||||
## Tenant Management
|
||||
|
||||
Scores for managing multi-tenancy within a cluster.
|
||||
|
||||
- **TenantScore**: Creates a new tenant (e.g., a namespace, quotas, network policies).
|
||||
- **TenantCredentialScore**: Generates and provisions credentials for a new tenant.
|
||||
|
||||
## Utility
|
||||
|
||||
Helper scores for discovery and inspection.
|
||||
|
||||
- **LaunchDiscoverInventoryAgentScore**: Launches the agent responsible for the `OKDSetup01InventoryScore`.
|
||||
- **DiscoverHostForRoleScore**: A utility score to find a host matching a specific role in the inventory.
|
||||
- **InspectInventoryScore**: Dumps the discovered inventory for inspection.
|
||||
59
docs/catalogs/topologies.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Topologies Catalog
|
||||
|
||||
A `Topology` is the logical representation of your infrastructure and its `Capabilities`. You select a `Topology` in your Harmony project to define _where_ your `Scores` will be applied.
|
||||
|
||||
<!--toc:start-->
|
||||
|
||||
- [Topologies Catalog](#topologies-catalog)
|
||||
- [HAClusterTopology](#haclustertopology)
|
||||
- [K8sAnywhereTopology](#k8sanywheretopology)
|
||||
|
||||
<!--toc:end-->
|
||||
|
||||
### HAClusterTopology
|
||||
|
||||
- **`HAClusterTopology::autoload()`**
|
||||
|
||||
This `Topology` represents a high-availability, bare-metal cluster. It is designed for production-grade deployments like OKD.
|
||||
|
||||
It models an environment consisting of:
|
||||
|
||||
- At least 3 cluster nodes (for control plane/workers)
|
||||
- 2 redundant firewalls (e.g., OPNsense)
|
||||
- 2 redundant network switches
|
||||
|
||||
**Provided Capabilities:**
|
||||
This topology provides a rich set of capabilities required for bare-metal provisioning and cluster management, including:
|
||||
|
||||
- `K8sClient` (once the cluster is bootstrapped)
|
||||
- `DnsServer`
|
||||
- `LoadBalancer`
|
||||
- `DhcpServer`
|
||||
- `TftpServer`
|
||||
- `Router` (via the firewalls)
|
||||
- `Switch`
|
||||
- `NetworkManager` (for host-level network config)
|
||||
|
||||
---
|
||||
|
||||
### K8sAnywhereTopology
|
||||
|
||||
- **`K8sAnywhereTopology::from_env()`**
|
||||
|
||||
This `Topology` is designed for development and application deployment. It provides a simple, abstract way to deploy to _any_ Kubernetes cluster.
|
||||
|
||||
**How it works:**
|
||||
|
||||
1. By default (`from_env()` with no env vars), it automatically provisions a **local K3D (k3s-in-docker) cluster** on your machine. This is perfect for local development and testing.
|
||||
2. If you provide a `KUBECONFIG` environment variable, it will instead connect to that **existing Kubernetes cluster** (e.g., your staging or production OKD cluster).
|
||||
|
||||
This allows you to use the _exact same code_ to deploy your application locally as you do to deploy it to production.
|
||||
|
||||
**Provided Capabilities:**
|
||||
|
||||
- `K8sClient`
|
||||
- `HelmCommand`
|
||||
- `TenantManager`
|
||||
- `Ingress`
|
||||
- `Monitoring`
|
||||
- ...and more.
|
||||
40
docs/concepts.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Core Concepts
|
||||
|
||||
Harmony's design is based on a few key concepts. Understanding them is the key to unlocking the framework's power.
|
||||
|
||||
### 1. Score
|
||||
|
||||
- **What it is:** A **Score** is a declarative description of a desired state. It's a "resource" that defines _what_ you want to achieve, not _how_ to do it.
|
||||
- **Example:** `ApplicationScore` declares "I want this web application to be running and monitored."
|
||||
|
||||
### 2. Topology
|
||||
|
||||
- **What it is:** A **Topology** is the logical representation of your infrastructure and its abilities. It's the "where" your Scores will be applied.
|
||||
- **Key Job:** A Topology's most important job is to expose which `Capabilities` it supports.
|
||||
- **Example:** `HAClusterTopology` represents a bare-metal cluster and exposes `Capabilities` like `NetworkManager` and `Switch`. `K8sAnywhereTopology` represents a Kubernetes cluster and exposes the `K8sClient` `Capability`.
|
||||
|
||||
### 3. Capability
|
||||
|
||||
- **What it is:** A **Capability** is a specific feature or API that a `Topology` offers. It's the "how" a `Topology` can fulfill a `Score`'s request.
|
||||
- **Example:** The `K8sClient` capability offers a way to interact with a Kubernetes API. The `Switch` capability offers a way to configure a physical network switch.
|
||||
|
||||
### 4. Interpret
|
||||
|
||||
- **What it is:** An **Interpret** is the execution logic that makes a `Score` a reality. It's the "glue" that connects the _desired state_ (`Score`) to the _environment's abilities_ (`Topology`'s `Capabilities`).
|
||||
- **How it works:** When you apply a `Score`, Harmony finds the matching `Interpret` for your `Topology`. This `Interpret` then uses the `Capabilities` provided by the `Topology` to execute the necessary steps.
|
||||
|
||||
### 5. Inventory
|
||||
|
||||
- **What it is:** An **Inventory** is the physical material (the "what") used in a cluster. This is most relevant for bare-metal or on-premise topologies.
|
||||
- **Example:** A list of nodes with their roles (control plane, worker), CPU, RAM, and network interfaces. For the `K8sAnywhereTopology`, the inventory might be empty or autoloaded, as the infrastructure is more abstract.
|
||||
|
||||
---
|
||||
|
||||
### How They Work Together (The Compile-Time Check)
|
||||
|
||||
1. You **write a `Score`** (e.g., `ApplicationScore`).
|
||||
2. Your `Score`'s `Interpret` logic requires certain **`Capabilities`** (e.g., `K8sClient` and `Ingress`).
|
||||
3. You choose a **`Topology`** to run it on (e.g., `HAClusterTopology`).
|
||||
4. **At compile-time**, Harmony checks: "Does `HAClusterTopology` provide the `K8sClient` and `Ingress` capabilities that `ApplicationScore` needs?"
|
||||
- **If Yes:** Your code compiles. You can be confident it will run.
|
||||
- **If No:** The compiler gives you an error. You've just prevented a "config-is-valid-but-platform-is-wrong" runtime error before you even deployed.
|
||||
42
docs/guides/getting-started.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Getting Started Guide
|
||||
|
||||
Welcome to Harmony! This guide will walk you through installing the Harmony framework, setting up a new project, and deploying your first application.
|
||||
|
||||
We will build and deploy the "Rust Web App" example, which automatically:
|
||||
|
||||
1. Provisions a local K3D (Kubernetes in Docker) cluster.
|
||||
2. Deploys a sample Rust web application.
|
||||
3. Sets up monitoring for the application.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, you'll need a few tools installed on your system:
|
||||
|
||||
- **Rust & Cargo:** [Install Rust](https://www.rust-lang.org/tools/install)
|
||||
- **Docker:** [Install Docker](https://docs.docker.com/get-docker/) (Required for the K3D local cluster)
|
||||
- **kubectl:** [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (For inspecting the cluster)
|
||||
|
||||
## 1. Install Harmony
|
||||
|
||||
First, clone the Harmony repository and build the project. This gives you the `harmony` CLI and all the core libraries.
|
||||
|
||||
```bash
|
||||
# Clone the main repository
|
||||
git clone https://git.nationtech.io/nationtech/harmony
|
||||
cd harmony
|
||||
|
||||
# Build the project (this may take a few minutes)
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
...
|
||||
|
||||
## Next Steps
|
||||
|
||||
Congratulations, you've just deployed an application using true infrastructure-as-code!
|
||||
|
||||
From here, you can:
|
||||
|
||||
- [Explore the Catalogs](../catalogs/README.md): See what other [Scores](../catalogs/scores.md) and [Topologies](../catalogs/topologies.md) are available.
|
||||
- [Read the Use Cases](../use-cases/README.md): Check out the [OKD on Bare Metal](./use-cases/okd-on-bare-metal.md) guide for a more advanced scenario.
|
||||
- [Write your own Score](../guides/writing-a-score.md): Dive into the [Developer Guide](./guides/developer-guide.md) to start building your own components.
|
||||
108
docs/pxe_test/README.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# OPNsense PXE Lab Environment
|
||||
|
||||
This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall.
|
||||
|
||||
## Overview
|
||||
|
||||
The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM:
|
||||
|
||||
1. **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab.
|
||||
2. **Two Virtual Machines**:
|
||||
* `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server.
|
||||
* `pxe-node-1`: A client VM configured to boot from the network.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Ensure you have the following software installed on your Arch Linux host:
|
||||
|
||||
* `libvirt`
|
||||
* `qemu`
|
||||
* `virt-install` (from the `virt-install` package)
|
||||
* `curl`
|
||||
* `bzip2`
|
||||
|
||||
## Usage
|
||||
|
||||
### 1. Create the Environment
|
||||
|
||||
Run the `up` command to download the necessary images and create the network and VMs.
|
||||
|
||||
```bash
|
||||
sudo ./pxe_vm_lab_setup.sh up
|
||||
```
|
||||
|
||||
### 2. Install and Configure OPNsense
|
||||
|
||||
The OPNsense VM is created but the OS needs to be installed manually via the console.
|
||||
|
||||
1. **Connect to the VM console**:
|
||||
```bash
|
||||
sudo virsh console opnsense-pxe
|
||||
```
|
||||
|
||||
2. **Log in as the installer**:
|
||||
* Username: `installer`
|
||||
* Password: `opnsense`
|
||||
|
||||
3. **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`):
|
||||
* Find the MAC address for the `harmonylan` interface by running this command in another terminal:
|
||||
```bash
|
||||
virsh domiflist opnsense-pxe
|
||||
# Example output:
|
||||
# Interface Type Source Model MAC
|
||||
# ---------------------------------------------------------
|
||||
# vnet18 network default virtio 52:54:00:b5:c4:6d
|
||||
# vnet19 network harmonylan virtio 52:54:00:21:f9:ba
|
||||
```
|
||||
* Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**.
|
||||
* Assign the other interface as your **WAN**.
|
||||
|
||||
4. After the installation is complete, **shut down** the VM from the console menu.
|
||||
|
||||
5. **Detach the installation media** by editing the VM's configuration:
|
||||
```bash
|
||||
sudo virsh edit opnsense-pxe
|
||||
```
|
||||
Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`).
|
||||
|
||||
6. **Start the VM** to boot into the newly installed system:
|
||||
```bash
|
||||
sudo virsh start opnsense-pxe
|
||||
```
|
||||
|
||||
### 3. Connect to OPNsense from Your Host
|
||||
|
||||
To configure OPNsense, you need to connect your host to the `harmonylan` network.
|
||||
|
||||
1. By default, OPNsense configures its LAN interface with the IP `192.168.1.1`.
|
||||
2. Assign a compatible IP address to your host's `virbr1` bridge interface:
|
||||
```bash
|
||||
sudo ip addr add 192.168.1.5/24 dev virbr1
|
||||
```
|
||||
3. You can now access the OPNsense VM from your host:
|
||||
* **SSH**: `ssh root@192.168.1.1` (password: `opnsense`)
|
||||
* **Web UI**: `https://192.168.1.1`
|
||||
|
||||
### 4. Configure PXE Services with Harmony
|
||||
|
||||
With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details:
|
||||
|
||||
* **Hostname/IP**: `192.168.1.1`
|
||||
* **Credentials**: `root` / `opnsense`
|
||||
|
||||
### 5. Boot the PXE Client
|
||||
|
||||
Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network.
|
||||
|
||||
```bash
|
||||
sudo virsh start pxe-node-1
|
||||
sudo virsh console pxe-node-1
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
To destroy all VMs and networks created by the script, run the `clean` command:
|
||||
|
||||
```bash
|
||||
sudo ./pxe_vm_lab_setup.sh clean
|
||||
```
|
||||
191
docs/pxe_test/pxe_vm_lab_setup.sh
Executable file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# --- Configuration ---
|
||||
LAB_DIR="/var/lib/harmony_pxe_test"
|
||||
IMG_DIR="${LAB_DIR}/images"
|
||||
STATE_DIR="${LAB_DIR}/state"
|
||||
VM_OPN="opnsense-pxe"
|
||||
VM_PXE="pxe-node-1"
|
||||
NET_HARMONYLAN="harmonylan"
|
||||
|
||||
# Network settings for the isolated LAN
|
||||
VLAN_CIDR="192.168.150.0/24"
|
||||
VLAN_GW="192.168.150.1"
|
||||
VLAN_MASK="255.255.255.0"
|
||||
|
||||
# VM Specifications
|
||||
RAM_OPN="2048"
|
||||
VCPUS_OPN="2"
|
||||
DISK_OPN_GB="10"
|
||||
OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant
|
||||
|
||||
RAM_PXE="4096"
|
||||
VCPUS_PXE="2"
|
||||
DISK_PXE_GB="40"
|
||||
OS_VARIANT_LINUX="centos-stream9"
|
||||
|
||||
OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2"
|
||||
OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img"
|
||||
CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso"
|
||||
CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso"
|
||||
|
||||
CONNECT_URI="qemu:///system"
|
||||
|
||||
download_if_missing() {
|
||||
local url="$1"
|
||||
local dest="$2"
|
||||
if [[ ! -f "$dest" ]]; then
|
||||
echo "Downloading $url to $dest"
|
||||
mkdir -p "$(dirname "$dest")"
|
||||
local tmp
|
||||
tmp="$(mktemp)"
|
||||
curl -L --progress-bar "$url" -o "$tmp"
|
||||
case "$url" in
|
||||
*.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;;
|
||||
*) mv "$tmp" "$dest" ;;
|
||||
esac
|
||||
else
|
||||
echo "Already present: $dest"
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensures a libvirt network is defined and active
|
||||
ensure_network() {
|
||||
local net_name="$1"
|
||||
local net_xml_path="$2"
|
||||
if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then
|
||||
echo "Network ${net_name} already exists."
|
||||
else
|
||||
echo "Defining network ${net_name} from ${net_xml_path}"
|
||||
virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}"
|
||||
fi
|
||||
|
||||
if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then
|
||||
echo "Starting network ${net_name}..."
|
||||
virsh --connect "${CONNECT_URI}" net-start "${net_name}"
|
||||
virsh --connect "${CONNECT_URI}" net-autostart "${net_name}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Destroys a VM completely
|
||||
destroy_vm() {
|
||||
local vm_name="$1"
|
||||
if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then
|
||||
echo "Destroying and undefining VM: ${vm_name}"
|
||||
virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true
|
||||
virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram
|
||||
fi
|
||||
}
|
||||
|
||||
# Destroys a libvirt network
|
||||
destroy_network() {
|
||||
local net_name="$1"
|
||||
if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then
|
||||
echo "Destroying and undefining network: ${net_name}"
|
||||
virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true
|
||||
virsh --connect "${CONNECT_URI}" net-undefine "$net_name"
|
||||
fi
|
||||
}
|
||||
|
||||
# --- Main Logic ---
|
||||
create_lab_environment() {
|
||||
# Create network definition files
|
||||
cat > "${STATE_DIR}/default.xml" <<EOF
|
||||
<network>
|
||||
<name>default</name>
|
||||
<forward mode='nat'/>
|
||||
<bridge name='virbr0' stp='on' delay='0'/>
|
||||
<ip address='192.168.122.1' netmask='255.255.255.0'>
|
||||
<dhcp>
|
||||
<range start='192.168.122.100' end='192.168.122.200'/>
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF
|
||||
<network>
|
||||
<name>${NET_HARMONYLAN}</name>
|
||||
<bridge name='virbr1' stp='on' delay='0'/>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
# Ensure both networks exist and are active
|
||||
ensure_network "default" "${STATE_DIR}/default.xml"
|
||||
ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml"
|
||||
|
||||
# --- Create OPNsense VM (MODIFIED SECTION) ---
|
||||
local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2"
|
||||
if [[ ! -f "$disk_opn" ]]; then
|
||||
qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G"
|
||||
fi
|
||||
|
||||
echo "Creating OPNsense VM using serial image..."
|
||||
virt-install \
|
||||
--connect "${CONNECT_URI}" \
|
||||
--name "${VM_OPN}" \
|
||||
--ram "${RAM_OPN}" \
|
||||
--vcpus "${VCPUS_OPN}" \
|
||||
--cpu host-passthrough \
|
||||
--os-variant "${OS_VARIANT_OPN}" \
|
||||
--graphics none \
|
||||
--noautoconsole \
|
||||
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
|
||||
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
|
||||
--network network=default,model=virtio \
|
||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
||||
--boot uefi,menu=on
|
||||
|
||||
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
|
||||
echo "The VM will boot from the serial installation image."
|
||||
echo "Login with user 'installer' and password 'opnsense' to start the installation."
|
||||
echo "Install onto the VirtIO disk (vtbd0)."
|
||||
echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system."
|
||||
|
||||
# --- Create PXE Client VM ---
|
||||
local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2"
|
||||
if [[ ! -f "$disk_pxe" ]]; then
|
||||
qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G"
|
||||
fi
|
||||
|
||||
echo "Creating PXE client VM..."
|
||||
virt-install \
|
||||
--connect "${CONNECT_URI}" \
|
||||
--name "${VM_PXE}" \
|
||||
--ram "${RAM_PXE}" \
|
||||
--vcpus "${VCPUS_PXE}" \
|
||||
--cpu host-passthrough \
|
||||
--os-variant "${OS_VARIANT_LINUX}" \
|
||||
--graphics none \
|
||||
--noautoconsole \
|
||||
--disk path="${disk_pxe}",format=qcow2,bus=virtio \
|
||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
||||
--pxe \
|
||||
--boot uefi,menu=on
|
||||
|
||||
echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}."
|
||||
}
|
||||
|
||||
# --- Script Entrypoint ---
|
||||
case "${1:-}" in
|
||||
up)
|
||||
mkdir -p "${IMG_DIR}" "${STATE_DIR}"
|
||||
download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH"
|
||||
download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH"
|
||||
create_lab_environment
|
||||
echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs."
|
||||
;;
|
||||
clean)
|
||||
destroy_vm "${VM_PXE}"
|
||||
destroy_vm "${VM_OPN}"
|
||||
destroy_network "${NET_HARMONYLAN}"
|
||||
# Optionally destroy the default network if you want a full reset
|
||||
# destroy_network "default"
|
||||
echo "Cleanup complete."
|
||||
;;
|
||||
*)
|
||||
echo "Usage: sudo $0 {up|clean}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -7,8 +7,9 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
env_logger.workspace = true
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
logging = "0.1.0"
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
use std::{path::PathBuf, str::FromStr, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
|
||||
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url, tenant::TenantConfig},
|
||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
@@ -26,9 +27,9 @@ async fn main() {
|
||||
};
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "example-monitoring".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 3000,
|
||||
});
|
||||
|
||||
let webhook_receiver = WebhookReceiver {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
|
||||
@@ -13,6 +16,9 @@ async fn main() {
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
Box::new(LaunchDiscoverInventoryAgentScore {
|
||||
discovery_timeout: Some(10),
|
||||
}),
|
||||
],
|
||||
None,
|
||||
)
|
||||
|
||||
@@ -2,8 +2,9 @@ use harmony::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::lamp::{LAMPConfig, LAMPScore},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
||||
@@ -6,8 +6,9 @@ readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony_macros = { version = "0.1.0", path = "../../harmony_macros" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
@@ -22,8 +22,9 @@ use harmony::{
|
||||
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
||||
@@ -7,7 +7,8 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
cidr.workspace = true
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
monitoring::{
|
||||
@@ -19,10 +18,12 @@ use harmony::{
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{
|
||||
K8sAnywhereTopology, Url,
|
||||
K8sAnywhereTopology,
|
||||
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
||||
},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
|
||||
@@ -13,6 +13,9 @@ harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_secret = { path = "../../harmony_secret" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
brocade = { path = "../../brocade" }
|
||||
|
||||