Compare commits
1 Commits
f31d21f9da
...
dev/postgr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b8525fe05 |
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -2,5 +2,3 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text
|
|||||||
grubx64.efi filter=lfs diff=lfs merge=lfs -text
|
grubx64.efi filter=lfs diff=lfs merge=lfs -text
|
||||||
initrd filter=lfs diff=lfs merge=lfs -text
|
initrd filter=lfs diff=lfs merge=lfs -text
|
||||||
linux filter=lfs diff=lfs merge=lfs -text
|
linux filter=lfs diff=lfs merge=lfs -text
|
||||||
data/okd/bin/* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ jobs:
|
|||||||
check:
|
check:
|
||||||
runs-on: docker
|
runs-on: docker
|
||||||
container:
|
container:
|
||||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
package_harmony_composer:
|
package_harmony_composer:
|
||||||
container:
|
container:
|
||||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@@ -45,14 +45,14 @@ jobs:
|
|||||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
|
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
|
||||||
| jq -r '.id // empty')
|
| jq -r '.id // empty')
|
||||||
|
|
||||||
if [ -n "$RELEASE_ID" ]; then
|
if [ -n "$RELEASE_ID" ]; then
|
||||||
# Delete existing release
|
# Delete existing release
|
||||||
curl -X DELETE \
|
curl -X DELETE \
|
||||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
|
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create new release
|
# Create new release
|
||||||
RESPONSE=$(curl -X POST \
|
RESPONSE=$(curl -X POST \
|
||||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
"prerelease": true
|
"prerelease": true
|
||||||
}' \
|
}' \
|
||||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
|
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
|
||||||
|
|
||||||
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
|
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Upload Linux binary
|
- name: Upload Linux binary
|
||||||
|
|||||||
29
.gitignore
vendored
29
.gitignore
vendored
@@ -1,26 +1,3 @@
|
|||||||
### General ###
|
target
|
||||||
private_repos/
|
private_repos
|
||||||
|
log/
|
||||||
### Harmony ###
|
|
||||||
harmony.log
|
|
||||||
data/okd/installation_files*
|
|
||||||
|
|
||||||
### Helm ###
|
|
||||||
# Chart dependencies
|
|
||||||
**/charts/*.tgz
|
|
||||||
|
|
||||||
### Rust ###
|
|
||||||
# Generated by Cargo
|
|
||||||
# will have compiled files and executables
|
|
||||||
debug/
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
|
||||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
|
||||||
Cargo.lock
|
|
||||||
|
|
||||||
# These are backup files generated by rustfmt
|
|
||||||
**/*.rs.bk
|
|
||||||
|
|
||||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
|
||||||
*.pdb
|
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "host_id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
|
|
||||||
}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json<PhysicalHost>\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "version_id",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "data: Json<PhysicalHost>",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Blob"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067"
|
|
||||||
}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "SELECT id, version_id, data as \"data: Json<PhysicalHost>\" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "version_id",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "data: Json<PhysicalHost>",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Null"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990"
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 2
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8"
|
|
||||||
}
|
|
||||||
2265
Cargo.lock
generated
2265
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
20
Cargo.toml
20
Cargo.toml
@@ -12,9 +12,6 @@ members = [
|
|||||||
"harmony_cli",
|
"harmony_cli",
|
||||||
"k3d",
|
"k3d",
|
||||||
"harmony_composer",
|
"harmony_composer",
|
||||||
"harmony_inventory_agent",
|
|
||||||
"harmony_secret_derive",
|
|
||||||
"harmony_secret", "adr/agent_discovery/mdns",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -23,7 +20,7 @@ readme = "README.md"
|
|||||||
license = "GNU AGPL v3"
|
license = "GNU AGPL v3"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
log = { version = "0.4", features = ["kv"] }
|
log = "0.4"
|
||||||
env_logger = "0.11"
|
env_logger = "0.11"
|
||||||
derive-new = "0.7"
|
derive-new = "0.7"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
@@ -36,7 +33,7 @@ tokio = { version = "1.40", features = [
|
|||||||
cidr = { features = ["serde"], version = "0.2" }
|
cidr = { features = ["serde"], version = "0.2" }
|
||||||
russh = "0.45"
|
russh = "0.45"
|
||||||
russh-keys = "0.45"
|
russh-keys = "0.45"
|
||||||
rand = "0.9"
|
rand = "0.8"
|
||||||
url = "2.5"
|
url = "2.5"
|
||||||
kube = { version = "1.1.0", features = [
|
kube = { version = "1.1.0", features = [
|
||||||
"config",
|
"config",
|
||||||
@@ -55,16 +52,3 @@ convert_case = "0.8"
|
|||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
similar = "2"
|
similar = "2"
|
||||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||||
pretty_assertions = "1.4.1"
|
|
||||||
tempfile = "3.20.0"
|
|
||||||
bollard = "0.19.1"
|
|
||||||
base64 = "0.22.1"
|
|
||||||
tar = "0.4.44"
|
|
||||||
lazy_static = "1.5.0"
|
|
||||||
directories = "6.0.0"
|
|
||||||
thiserror = "2.0.14"
|
|
||||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
|
||||||
serde_json = "1.0.127"
|
|
||||||
askama = "0.14"
|
|
||||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
|
||||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM docker.io/rust:1.89.0 AS build
|
FROM docker.io/rust:1.87.0 AS build
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@@ -6,14 +6,13 @@ COPY . .
|
|||||||
|
|
||||||
RUN cargo build --release --bin harmony_composer
|
RUN cargo build --release --bin harmony_composer
|
||||||
|
|
||||||
FROM docker.io/rust:1.89.0
|
FROM docker.io/rust:1.87.0
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN rustup target add x86_64-pc-windows-gnu
|
RUN rustup target add x86_64-pc-windows-gnu
|
||||||
RUN rustup target add x86_64-unknown-linux-gnu
|
RUN rustup target add x86_64-unknown-linux-gnu
|
||||||
RUN rustup component add rustfmt
|
RUN rustup component add rustfmt
|
||||||
RUN rustup component add clippy
|
|
||||||
|
|
||||||
RUN apt update
|
RUN apt update
|
||||||
|
|
||||||
@@ -23,4 +22,4 @@ RUN apt install -y nodejs docker.io mingw-w64
|
|||||||
|
|
||||||
COPY --from=build /app/target/release/harmony_composer .
|
COPY --from=build /app/target/release/harmony_composer .
|
||||||
|
|
||||||
ENTRYPOINT ["/app/harmony_composer"]
|
ENTRYPOINT ["/app/harmony_composer"]
|
||||||
73
README.md
73
README.md
@@ -1,6 +1,5 @@
|
|||||||
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
|
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code.
|
||||||
|
*By [NationTech](https://nationtech.io)*
|
||||||
_By [NationTech](https://nationtech.io)_
|
|
||||||
|
|
||||||
[](https://git.nationtech.io/nationtech/harmony)
|
[](https://git.nationtech.io/nationtech/harmony)
|
||||||
[](LICENSE)
|
[](LICENSE)
|
||||||
@@ -24,11 +23,11 @@ From a **developer laptop** to a **global production cluster**, a single **sourc
|
|||||||
|
|
||||||
Infrastructure is essential, but it shouldn’t be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
|
Infrastructure is essential, but it shouldn’t be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
|
||||||
|
|
||||||
| Principle | What it means for you |
|
| Principle | What it means for you |
|
||||||
| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
|-----------|-----------------------|
|
||||||
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
|
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
|
||||||
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your application’s needs match the target environment’s capabilities at **compile-time**, eliminating an entire class of runtime outages. |
|
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your application’s needs match the target environment’s capabilities at **compile-time**, eliminating an entire class of runtime outages. |
|
||||||
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
|
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
|
||||||
|
|
||||||
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
|
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
|
||||||
|
|
||||||
@@ -64,20 +63,22 @@ async fn main() {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
// 2. Enhance with extra scores (monitoring, CI/CD, …)
|
// 2. Pick where it should run
|
||||||
|
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||||
|
Inventory::autoload(), // auto-detect hardware / kube-config
|
||||||
|
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// 3. Enhance with extra scores (monitoring, CI/CD, …)
|
||||||
let mut monitoring = MonitoringAlertingStackScore::new();
|
let mut monitoring = MonitoringAlertingStackScore::new();
|
||||||
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
monitoring.namespace = Some(lamp_stack.config.namespace.clone());
|
||||||
|
|
||||||
// 3. Run your scores on the desired topology & inventory
|
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring)]);
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(), // auto-detect hardware / kube-config
|
// 4. Launch an interactive CLI / TUI
|
||||||
K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod…
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
vec![
|
|
||||||
Box::new(lamp_stack),
|
|
||||||
Box::new(monitoring)
|
|
||||||
],
|
|
||||||
None
|
|
||||||
).await.unwrap();
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -93,13 +94,13 @@ Harmony analyses the code, shows an execution plan in a TUI, and applies it once
|
|||||||
|
|
||||||
## 3 · Core Concepts
|
## 3 · Core Concepts
|
||||||
|
|
||||||
| Term | One-liner |
|
| Term | One-liner |
|
||||||
| ---------------- | ---------------------------------------------------------------------------------------------------- |
|
|------|-----------|
|
||||||
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
|
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
|
||||||
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
|
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
|
||||||
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
|
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified *Capabilities* (Kubernetes, DNS, …). |
|
||||||
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
|
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
|
||||||
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
|
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
|
||||||
|
|
||||||
A visual overview is in the diagram below.
|
A visual overview is in the diagram below.
|
||||||
|
|
||||||
@@ -111,9 +112,9 @@ A visual overview is in the diagram below.
|
|||||||
|
|
||||||
Prerequisites:
|
Prerequisites:
|
||||||
|
|
||||||
- Rust
|
* Rust
|
||||||
- Docker (if you deploy locally)
|
* Docker (if you deploy locally)
|
||||||
- `kubectl` / `helm` for Kubernetes-based topologies
|
* `kubectl` / `helm` for Kubernetes-based topologies
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://git.nationtech.io/nationtech/harmony
|
git clone https://git.nationtech.io/nationtech/harmony
|
||||||
@@ -125,15 +126,15 @@ cargo build --release # builds the CLI, TUI and libraries
|
|||||||
|
|
||||||
## 5 · Learning More
|
## 5 · Learning More
|
||||||
|
|
||||||
- **Architectural Decision Records** – dive into the rationale
|
* **Architectural Decision Records** – dive into the rationale
|
||||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||||
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
||||||
|
|
||||||
- **Extending Harmony** – write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
|
* **Extending Harmony** – write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
|
||||||
|
|
||||||
- **Community** – discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
* **Community** – discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -147,4 +148,4 @@ See [LICENSE](LICENSE) for the full text.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
_Made with ❤️ & 🦀 by the NationTech and the Harmony community_
|
*Made with ❤️ & 🦀 by the NationTech and the Harmony community*
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "mdns"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
mdns-sd = "0.14"
|
|
||||||
tokio = { version = "1", features = ["full"] }
|
|
||||||
futures = "0.3"
|
|
||||||
dmidecode = "0.2" # For getting the motherboard ID on the agent
|
|
||||||
log.workspace=true
|
|
||||||
env_logger.workspace=true
|
|
||||||
clap = { version = "4.5.46", features = ["derive"] }
|
|
||||||
get_if_addrs = "0.5.3"
|
|
||||||
local-ip-address = "0.6.5"
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
// harmony-agent/src/main.rs
|
|
||||||
|
|
||||||
use log::info;
|
|
||||||
use mdns_sd::{ServiceDaemon, ServiceInfo};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use crate::SERVICE_TYPE;
|
|
||||||
|
|
||||||
// The service we are advertising.
|
|
||||||
const SERVICE_PORT: u16 = 43210; // A port for the service. It needs one, even if unused.
|
|
||||||
|
|
||||||
pub async fn advertise() {
|
|
||||||
info!("Starting Harmony Agent...");
|
|
||||||
|
|
||||||
// Get a unique ID for this machine.
|
|
||||||
let motherboard_id = "some motherboard id";
|
|
||||||
let instance_name = format!("harmony-agent-{}", motherboard_id);
|
|
||||||
info!("This agent's instance name: {}", instance_name);
|
|
||||||
info!("Advertising with ID: {}", motherboard_id);
|
|
||||||
|
|
||||||
// Create a new mDNS daemon.
|
|
||||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
|
||||||
|
|
||||||
// Create a TXT record HashMap to hold our metadata.
|
|
||||||
let mut properties = HashMap::new();
|
|
||||||
properties.insert("id".to_string(), motherboard_id.to_string());
|
|
||||||
properties.insert("version".to_string(), "1.0".to_string());
|
|
||||||
|
|
||||||
// Create the service information.
|
|
||||||
// The instance name should be unique on the network.
|
|
||||||
let local_ip = local_ip_address::local_ip().unwrap();
|
|
||||||
let service_info = ServiceInfo::new(
|
|
||||||
SERVICE_TYPE,
|
|
||||||
&instance_name,
|
|
||||||
"harmony-host.local.", // A hostname for the service
|
|
||||||
local_ip,
|
|
||||||
// "0.0.0.0",
|
|
||||||
SERVICE_PORT,
|
|
||||||
Some(properties),
|
|
||||||
)
|
|
||||||
.expect("Failed to create service info");
|
|
||||||
|
|
||||||
// Register our service with the daemon.
|
|
||||||
mdns.register(service_info)
|
|
||||||
.expect("Failed to register service");
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"Service '{}' registered and now being advertised.",
|
|
||||||
instance_name
|
|
||||||
);
|
|
||||||
info!("Agent is running. Press Ctrl+C to exit.");
|
|
||||||
|
|
||||||
for iface in get_if_addrs::get_if_addrs().unwrap() {
|
|
||||||
println!("{:#?}", iface);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep the agent running indefinitely.
|
|
||||||
tokio::signal::ctrl_c().await.unwrap();
|
|
||||||
info!("Shutting down agent.");
|
|
||||||
}
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
|
||||||
|
|
||||||
use crate::SERVICE_TYPE;
|
|
||||||
|
|
||||||
pub async fn discover() {
|
|
||||||
println!("Starting Harmony Master and browsing for agents...");
|
|
||||||
|
|
||||||
// Create a new mDNS daemon.
|
|
||||||
let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon");
|
|
||||||
|
|
||||||
// Start browsing for the service type.
|
|
||||||
// The receiver will be a stream of events.
|
|
||||||
let receiver = mdns.browse(SERVICE_TYPE).expect("Failed to browse");
|
|
||||||
|
|
||||||
println!(
|
|
||||||
"Listening for mDNS events for '{}'. Press Ctrl+C to exit.",
|
|
||||||
SERVICE_TYPE
|
|
||||||
);
|
|
||||||
|
|
||||||
std::thread::spawn(move || {
|
|
||||||
while let Ok(event) = receiver.recv() {
|
|
||||||
match event {
|
|
||||||
ServiceEvent::ServiceData(resolved) => {
|
|
||||||
println!("Resolved a new service: {}", resolved.fullname);
|
|
||||||
}
|
|
||||||
other_event => {
|
|
||||||
println!("Received other event: {:?}", &other_event);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Gracefully shutdown the daemon.
|
|
||||||
std::thread::sleep(std::time::Duration::from_secs(1000000));
|
|
||||||
mdns.shutdown().unwrap();
|
|
||||||
|
|
||||||
// Process events as they come in.
|
|
||||||
// while let Ok(event) = receiver.recv_async().await {
|
|
||||||
// debug!("Received event {event:?}");
|
|
||||||
// // match event {
|
|
||||||
// // ServiceEvent::ServiceFound(svc_type, fullname) => {
|
|
||||||
// // println!("\n--- Agent Discovered ---");
|
|
||||||
// // println!(" Service Name: {}", fullname());
|
|
||||||
// // // You can now resolve this service to get its IP, port, and TXT records
|
|
||||||
// // // The resolve operation is a separate network call.
|
|
||||||
// // let receiver = mdns.browse(info.get_fullname()).unwrap();
|
|
||||||
// // if let Ok(resolve_event) = receiver.recv_timeout(Duration::from_secs(2)) {
|
|
||||||
// // if let ServiceEvent::ServiceResolved(info) = resolve_event {
|
|
||||||
// // let ip = info.get_addresses().iter().next().unwrap();
|
|
||||||
// // let port = info.get_port();
|
|
||||||
// // let motherboard_id = info.get_property("id").map_or("N/A", |v| v.val_str());
|
|
||||||
// //
|
|
||||||
// // println!(" IP: {}:{}", ip, port);
|
|
||||||
// // println!(" Motherboard ID: {}", motherboard_id);
|
|
||||||
// // println!("------------------------");
|
|
||||||
// //
|
|
||||||
// // // TODO: Add this agent to your central list of discovered hosts.
|
|
||||||
// // }
|
|
||||||
// // } else {
|
|
||||||
// // println!("Could not resolve service '{}' in time.", info.get_fullname());
|
|
||||||
// // }
|
|
||||||
// // }
|
|
||||||
// // ServiceEvent::ServiceRemoved(info) => {
|
|
||||||
// // println!("\n--- Agent Removed ---");
|
|
||||||
// // println!(" Service Name: {}", info.get_fullname());
|
|
||||||
// // println!("---------------------");
|
|
||||||
// // // TODO: Remove this agent from your list.
|
|
||||||
// // }
|
|
||||||
// // _ => {
|
|
||||||
// // // We don't care about other event types for this example
|
|
||||||
// // }
|
|
||||||
// // }
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn _discover_example() {
|
|
||||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
|
||||||
|
|
||||||
// Create a daemon
|
|
||||||
let mdns = ServiceDaemon::new().expect("Failed to create daemon");
|
|
||||||
|
|
||||||
// Use recently added `ServiceEvent::ServiceData`.
|
|
||||||
mdns.use_service_data(true)
|
|
||||||
.expect("Failed to use ServiceData");
|
|
||||||
|
|
||||||
// Browse for a service type.
|
|
||||||
let service_type = "_mdns-sd-my-test._udp.local.";
|
|
||||||
let receiver = mdns.browse(service_type).expect("Failed to browse");
|
|
||||||
|
|
||||||
// Receive the browse events in sync or async. Here is
|
|
||||||
// an example of using a thread. Users can call `receiver.recv_async().await`
|
|
||||||
// if running in async environment.
|
|
||||||
std::thread::spawn(move || {
|
|
||||||
while let Ok(event) = receiver.recv() {
|
|
||||||
match event {
|
|
||||||
ServiceEvent::ServiceData(resolved) => {
|
|
||||||
println!("Resolved a new service: {}", resolved.fullname);
|
|
||||||
}
|
|
||||||
other_event => {
|
|
||||||
println!("Received other event: {:?}", &other_event);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Gracefully shutdown the daemon.
|
|
||||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
|
||||||
mdns.shutdown().unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
use clap::{Parser, ValueEnum};
|
|
||||||
|
|
||||||
mod advertise;
|
|
||||||
mod discover;
|
|
||||||
|
|
||||||
#[derive(Parser, Debug)]
|
|
||||||
#[command(version, about, long_about = None)]
|
|
||||||
struct Args {
|
|
||||||
#[arg(value_enum)]
|
|
||||||
profile: Profiles,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
|
|
||||||
enum Profiles {
|
|
||||||
Advertise,
|
|
||||||
Discover,
|
|
||||||
}
|
|
||||||
|
|
||||||
// The service type we are looking for.
|
|
||||||
const SERVICE_TYPE: &str = "_harmony._tcp.local.";
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
env_logger::init();
|
|
||||||
let args = Args::parse();
|
|
||||||
|
|
||||||
match args.profile {
|
|
||||||
Profiles::Advertise => advertise::advertise().await,
|
|
||||||
Profiles::Discover => discover::discover().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
3
check.sh
3
check.sh
@@ -1,8 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
rustc --version
|
|
||||||
cargo check --all-targets --all-features --keep-going
|
cargo check --all-targets --all-features --keep-going
|
||||||
cargo fmt --check
|
cargo fmt --check
|
||||||
cargo clippy
|
|
||||||
cargo test
|
cargo test
|
||||||
|
|||||||
BIN
data/okd/bin/kubectl
(Stored with Git LFS)
BIN
data/okd/bin/kubectl
(Stored with Git LFS)
Binary file not shown.
BIN
data/okd/bin/oc
(Stored with Git LFS)
BIN
data/okd/bin/oc
(Stored with Git LFS)
Binary file not shown.
BIN
data/okd/bin/oc_README.md
(Stored with Git LFS)
BIN
data/okd/bin/oc_README.md
(Stored with Git LFS)
Binary file not shown.
BIN
data/okd/bin/openshift-install
(Stored with Git LFS)
BIN
data/okd/bin/openshift-install
(Stored with Git LFS)
Binary file not shown.
BIN
data/okd/bin/openshift-install_README.md
(Stored with Git LFS)
BIN
data/okd/bin/openshift-install_README.md
(Stored with Git LFS)
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
(Stored with Git LFS)
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
(Stored with Git LFS)
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
(Stored with Git LFS)
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
(Stored with Git LFS)
Binary file not shown.
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
(Stored with Git LFS)
BIN
data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
(Stored with Git LFS)
Binary file not shown.
@@ -1 +0,0 @@
|
|||||||
scos-9.0.20250510-0-live-initramfs.x86_64.img
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
scos-9.0.20250510-0-live-kernel.x86_64
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
scos-9.0.20250510-0-live-rootfs.x86_64.img
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
Here lies all the data files required for an OKD cluster PXE boot setup.
|
|
||||||
|
|
||||||
This inclues ISO files, binary boot files, ipxe, etc.
|
|
||||||
|
|
||||||
TODO as of august 2025 :
|
|
||||||
|
|
||||||
- `harmony_inventory_agent` should be downloaded from official releases, this embedded version is practical for now though
|
|
||||||
- The cluster ssh key should be generated and handled by harmony with the private key saved in a secret store
|
|
||||||
9
data/pxe/okd/http_files/.gitattributes
vendored
9
data/pxe/okd/http_files/.gitattributes
vendored
@@ -1,9 +0,0 @@
|
|||||||
harmony_inventory_agent filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os/centos-stream-9 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os/centos-stream-9/images filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os/centos-stream-9/initrd.img filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os/centos-stream-9/vmlinuz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os/centos-stream-9/images/efiboot.img filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os/centos-stream-9/images/install.img filter=lfs diff=lfs merge=lfs -text
|
|
||||||
os/centos-stream-9/images/pxeboot filter=lfs diff=lfs merge=lfs -text
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
|
||||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
(Stored with Git LFS)
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/install.img
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/os/centos-stream-9/images/install.img
(Stored with Git LFS)
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/initrd.img
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/os/centos-stream-9/initrd.img
(Stored with Git LFS)
Binary file not shown.
BIN
data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
(Stored with Git LFS)
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,108 +0,0 @@
|
|||||||
# OPNsense PXE Lab Environment
|
|
||||||
|
|
||||||
This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM:
|
|
||||||
|
|
||||||
1. **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab.
|
|
||||||
2. **Two Virtual Machines**:
|
|
||||||
* `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server.
|
|
||||||
* `pxe-node-1`: A client VM configured to boot from the network.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
Ensure you have the following software installed on your Arch Linux host:
|
|
||||||
|
|
||||||
* `libvirt`
|
|
||||||
* `qemu`
|
|
||||||
* `virt-install` (from the `virt-install` package)
|
|
||||||
* `curl`
|
|
||||||
* `bzip2`
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### 1. Create the Environment
|
|
||||||
|
|
||||||
Run the `up` command to download the necessary images and create the network and VMs.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo ./pxe_vm_lab_setup.sh up
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Install and Configure OPNsense
|
|
||||||
|
|
||||||
The OPNsense VM is created but the OS needs to be installed manually via the console.
|
|
||||||
|
|
||||||
1. **Connect to the VM console**:
|
|
||||||
```bash
|
|
||||||
sudo virsh console opnsense-pxe
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Log in as the installer**:
|
|
||||||
* Username: `installer`
|
|
||||||
* Password: `opnsense`
|
|
||||||
|
|
||||||
3. **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`):
|
|
||||||
* Find the MAC address for the `harmonylan` interface by running this command in another terminal:
|
|
||||||
```bash
|
|
||||||
virsh domiflist opnsense-pxe
|
|
||||||
# Example output:
|
|
||||||
# Interface Type Source Model MAC
|
|
||||||
# ---------------------------------------------------------
|
|
||||||
# vnet18 network default virtio 52:54:00:b5:c4:6d
|
|
||||||
# vnet19 network harmonylan virtio 52:54:00:21:f9:ba
|
|
||||||
```
|
|
||||||
* Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**.
|
|
||||||
* Assign the other interface as your **WAN**.
|
|
||||||
|
|
||||||
4. After the installation is complete, **shut down** the VM from the console menu.
|
|
||||||
|
|
||||||
5. **Detach the installation media** by editing the VM's configuration:
|
|
||||||
```bash
|
|
||||||
sudo virsh edit opnsense-pxe
|
|
||||||
```
|
|
||||||
Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`).
|
|
||||||
|
|
||||||
6. **Start the VM** to boot into the newly installed system:
|
|
||||||
```bash
|
|
||||||
sudo virsh start opnsense-pxe
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Connect to OPNsense from Your Host
|
|
||||||
|
|
||||||
To configure OPNsense, you need to connect your host to the `harmonylan` network.
|
|
||||||
|
|
||||||
1. By default, OPNsense configures its LAN interface with the IP `192.168.1.1`.
|
|
||||||
2. Assign a compatible IP address to your host's `virbr1` bridge interface:
|
|
||||||
```bash
|
|
||||||
sudo ip addr add 192.168.1.5/24 dev virbr1
|
|
||||||
```
|
|
||||||
3. You can now access the OPNsense VM from your host:
|
|
||||||
* **SSH**: `ssh root@192.168.1.1` (password: `opnsense`)
|
|
||||||
* **Web UI**: `https://192.168.1.1`
|
|
||||||
|
|
||||||
### 4. Configure PXE Services with Harmony
|
|
||||||
|
|
||||||
With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details:
|
|
||||||
|
|
||||||
* **Hostname/IP**: `192.168.1.1`
|
|
||||||
* **Credentials**: `root` / `opnsense`
|
|
||||||
|
|
||||||
### 5. Boot the PXE Client
|
|
||||||
|
|
||||||
Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo virsh start pxe-node-1
|
|
||||||
sudo virsh console pxe-node-1
|
|
||||||
```
|
|
||||||
|
|
||||||
## Cleanup
|
|
||||||
|
|
||||||
To destroy all VMs and networks created by the script, run the `clean` command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo ./pxe_vm_lab_setup.sh clean
|
|
||||||
```
|
|
||||||
@@ -1,191 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# --- Configuration ---
|
|
||||||
LAB_DIR="/var/lib/harmony_pxe_test"
|
|
||||||
IMG_DIR="${LAB_DIR}/images"
|
|
||||||
STATE_DIR="${LAB_DIR}/state"
|
|
||||||
VM_OPN="opnsense-pxe"
|
|
||||||
VM_PXE="pxe-node-1"
|
|
||||||
NET_HARMONYLAN="harmonylan"
|
|
||||||
|
|
||||||
# Network settings for the isolated LAN
|
|
||||||
VLAN_CIDR="192.168.150.0/24"
|
|
||||||
VLAN_GW="192.168.150.1"
|
|
||||||
VLAN_MASK="255.255.255.0"
|
|
||||||
|
|
||||||
# VM Specifications
|
|
||||||
RAM_OPN="2048"
|
|
||||||
VCPUS_OPN="2"
|
|
||||||
DISK_OPN_GB="10"
|
|
||||||
OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant
|
|
||||||
|
|
||||||
RAM_PXE="4096"
|
|
||||||
VCPUS_PXE="2"
|
|
||||||
DISK_PXE_GB="40"
|
|
||||||
OS_VARIANT_LINUX="centos-stream9"
|
|
||||||
|
|
||||||
OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2"
|
|
||||||
OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img"
|
|
||||||
CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso"
|
|
||||||
CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso"
|
|
||||||
|
|
||||||
CONNECT_URI="qemu:///system"
|
|
||||||
|
|
||||||
download_if_missing() {
|
|
||||||
local url="$1"
|
|
||||||
local dest="$2"
|
|
||||||
if [[ ! -f "$dest" ]]; then
|
|
||||||
echo "Downloading $url to $dest"
|
|
||||||
mkdir -p "$(dirname "$dest")"
|
|
||||||
local tmp
|
|
||||||
tmp="$(mktemp)"
|
|
||||||
curl -L --progress-bar "$url" -o "$tmp"
|
|
||||||
case "$url" in
|
|
||||||
*.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;;
|
|
||||||
*) mv "$tmp" "$dest" ;;
|
|
||||||
esac
|
|
||||||
else
|
|
||||||
echo "Already present: $dest"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Ensures a libvirt network is defined and active
|
|
||||||
ensure_network() {
|
|
||||||
local net_name="$1"
|
|
||||||
local net_xml_path="$2"
|
|
||||||
if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then
|
|
||||||
echo "Network ${net_name} already exists."
|
|
||||||
else
|
|
||||||
echo "Defining network ${net_name} from ${net_xml_path}"
|
|
||||||
virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then
|
|
||||||
echo "Starting network ${net_name}..."
|
|
||||||
virsh --connect "${CONNECT_URI}" net-start "${net_name}"
|
|
||||||
virsh --connect "${CONNECT_URI}" net-autostart "${net_name}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Destroys a VM completely
|
|
||||||
destroy_vm() {
|
|
||||||
local vm_name="$1"
|
|
||||||
if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then
|
|
||||||
echo "Destroying and undefining VM: ${vm_name}"
|
|
||||||
virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true
|
|
||||||
virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Destroys a libvirt network
|
|
||||||
destroy_network() {
|
|
||||||
local net_name="$1"
|
|
||||||
if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then
|
|
||||||
echo "Destroying and undefining network: ${net_name}"
|
|
||||||
virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true
|
|
||||||
virsh --connect "${CONNECT_URI}" net-undefine "$net_name"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- Main Logic ---
|
|
||||||
create_lab_environment() {
|
|
||||||
# Create network definition files
|
|
||||||
cat > "${STATE_DIR}/default.xml" <<EOF
|
|
||||||
<network>
|
|
||||||
<name>default</name>
|
|
||||||
<forward mode='nat'/>
|
|
||||||
<bridge name='virbr0' stp='on' delay='0'/>
|
|
||||||
<ip address='192.168.122.1' netmask='255.255.255.0'>
|
|
||||||
<dhcp>
|
|
||||||
<range start='192.168.122.100' end='192.168.122.200'/>
|
|
||||||
</dhcp>
|
|
||||||
</ip>
|
|
||||||
</network>
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF
|
|
||||||
<network>
|
|
||||||
<name>${NET_HARMONYLAN}</name>
|
|
||||||
<bridge name='virbr1' stp='on' delay='0'/>
|
|
||||||
</network>
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Ensure both networks exist and are active
|
|
||||||
ensure_network "default" "${STATE_DIR}/default.xml"
|
|
||||||
ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml"
|
|
||||||
|
|
||||||
# --- Create OPNsense VM (MODIFIED SECTION) ---
|
|
||||||
local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2"
|
|
||||||
if [[ ! -f "$disk_opn" ]]; then
|
|
||||||
qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Creating OPNsense VM using serial image..."
|
|
||||||
virt-install \
|
|
||||||
--connect "${CONNECT_URI}" \
|
|
||||||
--name "${VM_OPN}" \
|
|
||||||
--ram "${RAM_OPN}" \
|
|
||||||
--vcpus "${VCPUS_OPN}" \
|
|
||||||
--cpu host-passthrough \
|
|
||||||
--os-variant "${OS_VARIANT_OPN}" \
|
|
||||||
--graphics none \
|
|
||||||
--noautoconsole \
|
|
||||||
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
|
|
||||||
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
|
|
||||||
--network network=default,model=virtio \
|
|
||||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
|
||||||
--boot uefi,menu=on
|
|
||||||
|
|
||||||
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
|
|
||||||
echo "The VM will boot from the serial installation image."
|
|
||||||
echo "Login with user 'installer' and password 'opnsense' to start the installation."
|
|
||||||
echo "Install onto the VirtIO disk (vtbd0)."
|
|
||||||
echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system."
|
|
||||||
|
|
||||||
# --- Create PXE Client VM ---
|
|
||||||
local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2"
|
|
||||||
if [[ ! -f "$disk_pxe" ]]; then
|
|
||||||
qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Creating PXE client VM..."
|
|
||||||
virt-install \
|
|
||||||
--connect "${CONNECT_URI}" \
|
|
||||||
--name "${VM_PXE}" \
|
|
||||||
--ram "${RAM_PXE}" \
|
|
||||||
--vcpus "${VCPUS_PXE}" \
|
|
||||||
--cpu host-passthrough \
|
|
||||||
--os-variant "${OS_VARIANT_LINUX}" \
|
|
||||||
--graphics none \
|
|
||||||
--noautoconsole \
|
|
||||||
--disk path="${disk_pxe}",format=qcow2,bus=virtio \
|
|
||||||
--network network="${NET_HARMONYLAN}",model=virtio \
|
|
||||||
--pxe \
|
|
||||||
--boot uefi,menu=on
|
|
||||||
|
|
||||||
echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}."
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- Script Entrypoint ---
|
|
||||||
case "${1:-}" in
|
|
||||||
up)
|
|
||||||
mkdir -p "${IMG_DIR}" "${STATE_DIR}"
|
|
||||||
download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH"
|
|
||||||
download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH"
|
|
||||||
create_lab_environment
|
|
||||||
echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs."
|
|
||||||
;;
|
|
||||||
clean)
|
|
||||||
destroy_vm "${VM_PXE}"
|
|
||||||
destroy_vm "${VM_OPN}"
|
|
||||||
destroy_network "${NET_HARMONYLAN}"
|
|
||||||
# Optionally destroy the default network if you want a full reset
|
|
||||||
# destroy_network "default"
|
|
||||||
echo "Cleanup complete."
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: sudo $0 {up|clean}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example-application-monitoring-with-tenant"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
env_logger.workspace = true
|
|
||||||
harmony = { path = "../../harmony" }
|
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
|
||||||
harmony_types = { path = "../../harmony_types" }
|
|
||||||
logging = "0.1.0"
|
|
||||||
tokio.workspace = true
|
|
||||||
url.workspace = true
|
|
||||||
Binary file not shown.
@@ -1,56 +0,0 @@
|
|||||||
use std::{path::PathBuf, str::FromStr, sync::Arc};
|
|
||||||
|
|
||||||
use harmony::{
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::{
|
|
||||||
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
|
|
||||||
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
|
|
||||||
tenant::TenantScore,
|
|
||||||
},
|
|
||||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
|
||||||
};
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use harmony_types::net::Url;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
//TODO there is a bug where the application is deployed into the namespace matching the
|
|
||||||
//application name and the tenant is created in the namesapce matching the tenant name
|
|
||||||
//in order for the application to be deployed in the tenant namespace the application.name and
|
|
||||||
//the TenantConfig.name must match
|
|
||||||
let tenant = TenantScore {
|
|
||||||
config: TenantConfig {
|
|
||||||
id: Id::from_str("test-tenant-id").unwrap(),
|
|
||||||
name: "example-monitoring".to_string(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
};
|
|
||||||
let application = Arc::new(RustWebapp {
|
|
||||||
name: "example-monitoring".to_string(),
|
|
||||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
|
||||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
|
||||||
framework: Some(RustWebFramework::Leptos),
|
|
||||||
});
|
|
||||||
|
|
||||||
let webhook_receiver = WebhookReceiver {
|
|
||||||
name: "sample-webhook-receiver".to_string(),
|
|
||||||
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let app = ApplicationScore {
|
|
||||||
features: vec![Box::new(Monitoring {
|
|
||||||
alert_receiver: vec![Box::new(webhook_receiver)],
|
|
||||||
application: application.clone(),
|
|
||||||
})],
|
|
||||||
application,
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
|
||||||
K8sAnywhereTopology::from_env(),
|
|
||||||
vec![Box::new(tenant), Box::new(app)],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,27 +1,20 @@
|
|||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
maestro::Maestro,
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
modules::dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
inventory::LaunchDiscoverInventoryAgentScore,
|
|
||||||
},
|
|
||||||
topology::LocalhostTopology,
|
topology::LocalhostTopology,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
harmony_cli::run(
|
let inventory = Inventory::autoload();
|
||||||
Inventory::autoload(),
|
let topology = LocalhostTopology::new();
|
||||||
LocalhostTopology::new(),
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
vec![
|
|
||||||
Box::new(SuccessScore {}),
|
maestro.register_all(vec![
|
||||||
Box::new(ErrorScore {}),
|
Box::new(SuccessScore {}),
|
||||||
Box::new(PanicScore {}),
|
Box::new(ErrorScore {}),
|
||||||
Box::new(LaunchDiscoverInventoryAgentScore {
|
Box::new(PanicScore {}),
|
||||||
discovery_timeout: Some(10),
|
]);
|
||||||
}),
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -125,47 +125,40 @@ spec:
|
|||||||
name: nginx"#,
|
name: nginx"#,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
deployment
|
return deployment;
|
||||||
}
|
}
|
||||||
fn nginx_deployment_2() -> Deployment {
|
fn nginx_deployment_2() -> Deployment {
|
||||||
let pod_template = PodTemplateSpec {
|
let mut pod_template = PodTemplateSpec::default();
|
||||||
metadata: Some(ObjectMeta {
|
pod_template.metadata = Some(ObjectMeta {
|
||||||
labels: Some(BTreeMap::from([(
|
labels: Some(BTreeMap::from([(
|
||||||
"app".to_string(),
|
"app".to_string(),
|
||||||
"nginx-test".to_string(),
|
"nginx-test".to_string(),
|
||||||
)])),
|
)])),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
pod_template.spec = Some(PodSpec {
|
||||||
|
containers: vec![Container {
|
||||||
|
name: "nginx".to_string(),
|
||||||
|
image: Some("nginx".to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
}],
|
||||||
spec: Some(PodSpec {
|
..Default::default()
|
||||||
containers: vec![Container {
|
});
|
||||||
name: "nginx".to_string(),
|
let mut spec = DeploymentSpec::default();
|
||||||
image: Some("nginx".to_string()),
|
spec.template = pod_template;
|
||||||
..Default::default()
|
spec.selector = LabelSelector {
|
||||||
}],
|
match_expressions: None,
|
||||||
..Default::default()
|
match_labels: Some(BTreeMap::from([(
|
||||||
}),
|
"app".to_string(),
|
||||||
|
"nginx-test".to_string(),
|
||||||
|
)])),
|
||||||
};
|
};
|
||||||
|
|
||||||
let spec = DeploymentSpec {
|
let mut deployment = Deployment::default();
|
||||||
template: pod_template,
|
deployment.spec = Some(spec);
|
||||||
selector: LabelSelector {
|
deployment.metadata.name = Some("nginx-test".to_string());
|
||||||
match_expressions: None,
|
|
||||||
match_labels: Some(BTreeMap::from([(
|
|
||||||
"app".to_string(),
|
|
||||||
"nginx-test".to_string(),
|
|
||||||
)])),
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
Deployment {
|
deployment
|
||||||
spec: Some(spec),
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("nginx-test".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn nginx_deployment() -> Deployment {
|
fn nginx_deployment() -> Deployment {
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use harmony::{
|
use harmony::{
|
||||||
data::Version,
|
data::Version,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::lamp::{LAMPConfig, LAMPScore},
|
modules::lamp::{LAMPConfig, LAMPScore},
|
||||||
topology::K8sAnywhereTopology,
|
topology::{K8sAnywhereTopology, Url},
|
||||||
};
|
};
|
||||||
use harmony_types::net::Url;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -24,7 +24,7 @@ async fn main() {
|
|||||||
// This config can be extended as needed for more complicated configurations
|
// This config can be extended as needed for more complicated configurations
|
||||||
config: LAMPConfig {
|
config: LAMPConfig {
|
||||||
project_root: "./php".into(),
|
project_root: "./php".into(),
|
||||||
database_size: "4Gi".to_string().into(),
|
database_size: format!("4Gi").into(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -43,13 +43,15 @@ async fn main() {
|
|||||||
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
|
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
|
||||||
// locally, to development environment from a CI, to staging, and to production with settings
|
// locally, to development environment from a CI, to staging, and to production with settings
|
||||||
// that automatically adapt to each environment grade.
|
// that automatically adapt to each environment grade.
|
||||||
harmony_cli::run(
|
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
K8sAnywhereTopology::from_env(),
|
K8sAnywhereTopology::from_env(),
|
||||||
vec![Box::new(lamp_stack)],
|
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
maestro.register_all(vec![Box::new(lamp_stack)]);
|
||||||
|
// Here we bootstrap the CLI, this gives some nice features if you need them
|
||||||
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
// That's it, end of the infra as code.
|
// That's it, end of the infra as code.
|
||||||
|
|||||||
@@ -6,9 +6,8 @@ readme.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
harmony = { path = "../../harmony" }
|
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
harmony_macros = { version = "0.1.0", path = "../../harmony_macros" }
|
||||||
harmony_types = { path = "../../harmony_types" }
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
monitoring::{
|
monitoring::{
|
||||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
@@ -22,9 +23,8 @@ use harmony::{
|
|||||||
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::{K8sAnywhereTopology, Url},
|
||||||
};
|
};
|
||||||
use harmony_types::net::Url;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -51,8 +51,8 @@ async fn main() {
|
|||||||
|
|
||||||
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
||||||
port: Some("80".to_string()),
|
port: Some("80".to_string()),
|
||||||
path: Some("/metrics".to_string()),
|
path: "/metrics".to_string(),
|
||||||
scheme: Some(HTTPScheme::HTTP),
|
scheme: HTTPScheme::HTTP,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -74,13 +74,13 @@ async fn main() {
|
|||||||
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
|
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
|
||||||
service_monitors: vec![service_monitor],
|
service_monitors: vec![service_monitor],
|
||||||
};
|
};
|
||||||
|
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
K8sAnywhereTopology::from_env(),
|
K8sAnywhereTopology::from_env(),
|
||||||
vec![Box::new(alerting_score)],
|
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
maestro.register_all(vec![Box::new(alerting_score)]);
|
||||||
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,8 +7,7 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
cidr.workspace = true
|
cidr.workspace = true
|
||||||
harmony = { path = "../../harmony" }
|
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||||
harmony_types = { path = "../../harmony_types" }
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
use std::{collections::HashMap, str::FromStr};
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
|
data::Id,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
monitoring::{
|
monitoring::{
|
||||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
@@ -18,18 +20,16 @@ use harmony::{
|
|||||||
tenant::TenantScore,
|
tenant::TenantScore,
|
||||||
},
|
},
|
||||||
topology::{
|
topology::{
|
||||||
K8sAnywhereTopology,
|
K8sAnywhereTopology, Url,
|
||||||
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
|
||||||
use harmony_types::net::Url;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let tenant = TenantScore {
|
let tenant = TenantScore {
|
||||||
config: TenantConfig {
|
config: TenantConfig {
|
||||||
id: Id::from_str("1234").unwrap(),
|
id: Id::from_string("1234".to_string()),
|
||||||
name: "test-tenant".to_string(),
|
name: "test-tenant".to_string(),
|
||||||
resource_limits: ResourceLimits {
|
resource_limits: ResourceLimits {
|
||||||
cpu_request_cores: 6.0,
|
cpu_request_cores: 6.0,
|
||||||
@@ -54,8 +54,8 @@ async fn main() {
|
|||||||
|
|
||||||
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
||||||
port: Some("80".to_string()),
|
port: Some("80".to_string()),
|
||||||
path: Some("/metrics".to_string()),
|
path: "/metrics".to_string(),
|
||||||
scheme: Some(HTTPScheme::HTTP),
|
scheme: HTTPScheme::HTTP,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -78,13 +78,13 @@ async fn main() {
|
|||||||
rules: vec![Box::new(additional_rules)],
|
rules: vec![Box::new(additional_rules)],
|
||||||
service_monitors: vec![service_monitor],
|
service_monitors: vec![service_monitor],
|
||||||
};
|
};
|
||||||
|
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
K8sAnywhereTopology::from_env(),
|
K8sAnywhereTopology::from_env(),
|
||||||
vec![Box::new(tenant), Box::new(alerting_score)],
|
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
maestro.register_all(vec![Box::new(tenant), Box::new(alerting_score)]);
|
||||||
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ harmony_types = { path = "../../harmony_types" }
|
|||||||
cidr = { workspace = true }
|
cidr = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
harmony_secret = { path = "../../harmony_secret" }
|
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
|||||||
@@ -5,19 +5,23 @@ use std::{
|
|||||||
|
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::SshKeyPair, data::{FileContent, FilePath}, hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, modules::{
|
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
http::StaticFilesHttpScore,
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
|
modules::{
|
||||||
|
http::HttpScore,
|
||||||
|
ipxe::IpxeScore,
|
||||||
okd::{
|
okd::{
|
||||||
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
||||||
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
|
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
|
||||||
dns::OKDDnsScore, ipxe::OKDIpxeScore,
|
dns::OKDDnsScore,
|
||||||
},
|
},
|
||||||
tftp::TftpScore,
|
tftp::TftpScore,
|
||||||
}, topology::{LogicalHost, UnmanagedRouter}
|
},
|
||||||
|
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, mac_address};
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
use harmony_types::net::Url;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -83,7 +87,8 @@ async fn main() {
|
|||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
location: Location::new("I am mobile".to_string(), "earth".to_string()),
|
location: Location::new("I am mobile".to_string(), "earth".to_string()),
|
||||||
switch: SwitchGroup::from([]),
|
switch: SwitchGroup::from([]),
|
||||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||||
|
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||||
storage_host: vec![],
|
storage_host: vec![],
|
||||||
worker_host: vec![
|
worker_host: vec![
|
||||||
PhysicalHost::empty(HostCategory::Server)
|
PhysicalHost::empty(HostCategory::Server)
|
||||||
@@ -120,43 +125,21 @@ async fn main() {
|
|||||||
let load_balancer_score =
|
let load_balancer_score =
|
||||||
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
||||||
|
|
||||||
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
|
|
||||||
|
|
||||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
let http_score = StaticFilesHttpScore {
|
let http_score = HttpScore::new(Url::LocalFolder(
|
||||||
folder_to_serve: Some(Url::LocalFolder(
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
));
|
||||||
)),
|
let ipxe_score = IpxeScore::new();
|
||||||
files: vec![],
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
remote_path: None,
|
maestro.register_all(vec![
|
||||||
};
|
Box::new(dns_score),
|
||||||
|
Box::new(bootstrap_dhcp_score),
|
||||||
let kickstart_filename = "inventory.kickstart".to_string();
|
Box::new(bootstrap_load_balancer_score),
|
||||||
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
|
Box::new(load_balancer_score),
|
||||||
|
Box::new(tftp_score),
|
||||||
let ipxe_score = OKDIpxeScore {
|
Box::new(http_score),
|
||||||
kickstart_filename,
|
Box::new(ipxe_score),
|
||||||
harmony_inventory_agent,
|
Box::new(dhcp_score),
|
||||||
cluster_pubkey: FileContent {
|
]);
|
||||||
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
|
harmony_tui::init(maestro).await.unwrap();
|
||||||
content: ssh_key.public,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_tui::run(
|
|
||||||
inventory,
|
|
||||||
topology,
|
|
||||||
vec![
|
|
||||||
Box::new(dns_score),
|
|
||||||
Box::new(bootstrap_dhcp_score),
|
|
||||||
Box::new(bootstrap_load_balancer_score),
|
|
||||||
Box::new(load_balancer_score),
|
|
||||||
Box::new(tftp_score),
|
|
||||||
Box::new(http_score),
|
|
||||||
Box::new(ipxe_score),
|
|
||||||
Box::new(dhcp_score),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,18 +1,19 @@
|
|||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory, modules::monitoring::ntfy::ntfy::NtfyScore, topology::K8sAnywhereTopology,
|
inventory::Inventory, maestro::Maestro, modules::monitoring::ntfy::ntfy::NtfyScore,
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
harmony_cli::run(
|
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
K8sAnywhereTopology::from_env(),
|
K8sAnywhereTopology::from_env(),
|
||||||
vec![Box::new(NtfyScore {
|
|
||||||
namespace: "monitoring".to_string(),
|
|
||||||
host: "localhost".to_string(),
|
|
||||||
})],
|
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
maestro.register_all(vec![Box::new(NtfyScore {
|
||||||
|
namespace: "monitoring".to_string(),
|
||||||
|
})]);
|
||||||
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example-okd-install"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
publish = false
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { path = "../../harmony" }
|
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
|
||||||
harmony_types = { path = "../../harmony_types" }
|
|
||||||
harmony_secret = { path = "../../harmony_secret" }
|
|
||||||
harmony_secret_derive = { path = "../../harmony_secret_derive" }
|
|
||||||
cidr = { workspace = true }
|
|
||||||
tokio = { workspace = true }
|
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
|
||||||
log = { workspace = true }
|
|
||||||
env_logger = { workspace = true }
|
|
||||||
url = { workspace = true }
|
|
||||||
serde.workspace = true
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
export HARMONY_SECRET_NAMESPACE=example-vms
|
|
||||||
export HARMONY_SECRET_STORE=file
|
|
||||||
export HARMONY_DATABASE_URL=sqlite://harmony_vms.sqlite RUST_LOG=info
|
|
||||||
export RUST_LOG=info
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
mod topology;
|
|
||||||
|
|
||||||
use crate::topology::{get_inventory, get_topology};
|
|
||||||
use harmony::{
|
|
||||||
config::secret::SshKeyPair,
|
|
||||||
data::{FileContent, FilePath},
|
|
||||||
modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
|
||||||
score::Score,
|
|
||||||
topology::HAClusterTopology,
|
|
||||||
};
|
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let inventory = get_inventory();
|
|
||||||
let topology = get_topology().await;
|
|
||||||
|
|
||||||
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
|
|
||||||
|
|
||||||
let mut scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![Box::new(OKDIpxeScore {
|
|
||||||
kickstart_filename: "inventory.kickstart".to_string(),
|
|
||||||
harmony_inventory_agent: "harmony_inventory_agent".to_string(),
|
|
||||||
cluster_pubkey: FileContent {
|
|
||||||
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
|
|
||||||
content: ssh_key.public,
|
|
||||||
},
|
|
||||||
})];
|
|
||||||
|
|
||||||
scores.append(&mut OKDInstallationPipeline::get_all_scores().await);
|
|
||||||
|
|
||||||
harmony_cli::run(inventory, topology, scores, None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
use cidr::Ipv4Cidr;
|
|
||||||
use harmony::{
|
|
||||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
|
||||||
inventory::Inventory,
|
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
|
||||||
};
|
|
||||||
use harmony_macros::{ip, ipv4};
|
|
||||||
use harmony_secret::{Secret, SecretManager};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{net::IpAddr, sync::Arc};
|
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
|
||||||
struct OPNSenseFirewallConfig {
|
|
||||||
username: String,
|
|
||||||
password: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
|
||||||
let firewall = harmony::topology::LogicalHost {
|
|
||||||
ip: ip!("192.168.1.1"),
|
|
||||||
name: String::from("opnsense-1"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
|
||||||
let config = config.unwrap();
|
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(
|
|
||||||
firewall,
|
|
||||||
None,
|
|
||||||
&config.username,
|
|
||||||
&config.password,
|
|
||||||
)
|
|
||||||
.await,
|
|
||||||
);
|
|
||||||
let lan_subnet = ipv4!("192.168.1.0");
|
|
||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
|
||||||
harmony::topology::HAClusterTopology {
|
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
|
||||||
gateway_ip,
|
|
||||||
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
|
|
||||||
)),
|
|
||||||
load_balancer: opnsense.clone(),
|
|
||||||
firewall: opnsense.clone(),
|
|
||||||
tftp_server: opnsense.clone(),
|
|
||||||
http_server: opnsense.clone(),
|
|
||||||
dhcp_server: opnsense.clone(),
|
|
||||||
dns_server: opnsense.clone(),
|
|
||||||
control_plane: vec![LogicalHost {
|
|
||||||
ip: ip!("192.168.1.20"),
|
|
||||||
name: "master".to_string(),
|
|
||||||
}],
|
|
||||||
bootstrap_host: LogicalHost {
|
|
||||||
ip: ip!("192.168.1.10"),
|
|
||||||
name: "bootstrap".to_string(),
|
|
||||||
},
|
|
||||||
workers: vec![],
|
|
||||||
switch: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_inventory() -> Inventory {
|
|
||||||
Inventory {
|
|
||||||
location: Location::new(
|
|
||||||
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
|
|
||||||
"testopnsense".to_string(),
|
|
||||||
),
|
|
||||||
switch: SwitchGroup::from([]),
|
|
||||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
|
||||||
storage_host: vec![],
|
|
||||||
worker_host: vec![],
|
|
||||||
control_plane_host: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
|
||||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
|
||||||
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
|
|
||||||
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
|
|
||||||
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
|
|
||||||
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
|
|
||||||
-----END OPENSSH PRIVATE KEY-----
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example-pxe"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
publish = false
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { path = "../../harmony" }
|
|
||||||
harmony_cli = { path = "../../harmony_cli" }
|
|
||||||
harmony_types = { path = "../../harmony_types" }
|
|
||||||
harmony_secret = { path = "../../harmony_secret" }
|
|
||||||
harmony_secret_derive = { path = "../../harmony_secret_derive" }
|
|
||||||
cidr = { workspace = true }
|
|
||||||
tokio = { workspace = true }
|
|
||||||
harmony_macros = { path = "../../harmony_macros" }
|
|
||||||
log = { workspace = true }
|
|
||||||
env_logger = { workspace = true }
|
|
||||||
url = { workspace = true }
|
|
||||||
serde.workspace = true
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
mod topology;
|
|
||||||
|
|
||||||
use crate::topology::{get_inventory, get_topology};
|
|
||||||
use harmony::{
|
|
||||||
config::secret::SshKeyPair,
|
|
||||||
data::{FileContent, FilePath},
|
|
||||||
modules::okd::ipxe::OKDIpxeScore,
|
|
||||||
};
|
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let inventory = get_inventory();
|
|
||||||
let topology = get_topology().await;
|
|
||||||
|
|
||||||
let kickstart_filename = "inventory.kickstart".to_string();
|
|
||||||
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
|
|
||||||
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
|
|
||||||
|
|
||||||
let ipxe_score = OKDIpxeScore {
|
|
||||||
kickstart_filename,
|
|
||||||
harmony_inventory_agent,
|
|
||||||
cluster_pubkey: FileContent {
|
|
||||||
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
|
|
||||||
content: ssh_key.public,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
use cidr::Ipv4Cidr;
|
|
||||||
use harmony::{
|
|
||||||
config::secret::OPNSenseFirewallCredentials,
|
|
||||||
hardware::{Location, SwitchGroup},
|
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
|
||||||
inventory::Inventory,
|
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
|
||||||
};
|
|
||||||
use harmony_macros::{ip, ipv4};
|
|
||||||
use harmony_secret::SecretManager;
|
|
||||||
use std::{net::IpAddr, sync::Arc};
|
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
|
||||||
let firewall = harmony::topology::LogicalHost {
|
|
||||||
ip: ip!("192.168.1.1"),
|
|
||||||
name: String::from("opnsense-1"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
|
||||||
let config = config.unwrap();
|
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(
|
|
||||||
firewall,
|
|
||||||
None,
|
|
||||||
&config.username,
|
|
||||||
&config.password,
|
|
||||||
)
|
|
||||||
.await,
|
|
||||||
);
|
|
||||||
let lan_subnet = ipv4!("192.168.1.0");
|
|
||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
|
||||||
harmony::topology::HAClusterTopology {
|
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
|
||||||
gateway_ip,
|
|
||||||
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
|
|
||||||
)),
|
|
||||||
load_balancer: opnsense.clone(),
|
|
||||||
firewall: opnsense.clone(),
|
|
||||||
tftp_server: opnsense.clone(),
|
|
||||||
http_server: opnsense.clone(),
|
|
||||||
dhcp_server: opnsense.clone(),
|
|
||||||
dns_server: opnsense.clone(),
|
|
||||||
control_plane: vec![LogicalHost {
|
|
||||||
ip: ip!("10.100.8.20"),
|
|
||||||
name: "cp0".to_string(),
|
|
||||||
}],
|
|
||||||
bootstrap_host: LogicalHost {
|
|
||||||
ip: ip!("10.100.8.20"),
|
|
||||||
name: "cp0".to_string(),
|
|
||||||
},
|
|
||||||
workers: vec![],
|
|
||||||
switch: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_inventory() -> Inventory {
|
|
||||||
Inventory {
|
|
||||||
location: Location::new(
|
|
||||||
"Some virtual machine or maybe a physical machine if you're cool".to_string(),
|
|
||||||
"testopnsense".to_string(),
|
|
||||||
),
|
|
||||||
switch: SwitchGroup::from([]),
|
|
||||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
|
||||||
storage_host: vec![],
|
|
||||||
worker_host: vec![],
|
|
||||||
control_plane_host: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
|
||||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
|
||||||
QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA
|
|
||||||
jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA
|
|
||||||
AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ
|
|
||||||
/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ==
|
|
||||||
-----END OPENSSH PRIVATE KEY-----
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2
|
|
||||||
@@ -5,20 +5,20 @@ use std::{
|
|||||||
|
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
http::StaticFilesHttpScore,
|
http::HttpScore,
|
||||||
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
||||||
opnsense::OPNsenseShellCommandScore,
|
opnsense::OPNsenseShellCommandScore,
|
||||||
tftp::TftpScore,
|
tftp::TftpScore,
|
||||||
},
|
},
|
||||||
topology::{LogicalHost, UnmanagedRouter},
|
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, mac_address};
|
||||||
use harmony_types::net::Url;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -63,7 +63,8 @@ async fn main() {
|
|||||||
"wk".to_string(),
|
"wk".to_string(),
|
||||||
),
|
),
|
||||||
switch: SwitchGroup::from([]),
|
switch: SwitchGroup::from([]),
|
||||||
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
|
firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall)
|
||||||
|
.management(Arc::new(OPNSenseManagementInterface::new()))]),
|
||||||
storage_host: vec![],
|
storage_host: vec![],
|
||||||
worker_host: vec![],
|
worker_host: vec![],
|
||||||
control_plane_host: vec![
|
control_plane_host: vec![
|
||||||
@@ -80,32 +81,23 @@ async fn main() {
|
|||||||
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
||||||
|
|
||||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
let http_score = StaticFilesHttpScore {
|
let http_score = HttpScore::new(Url::LocalFolder(
|
||||||
folder_to_serve: Some(Url::LocalFolder(
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
));
|
||||||
)),
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
files: vec![],
|
maestro.register_all(vec![
|
||||||
remote_path: None,
|
Box::new(dns_score),
|
||||||
};
|
Box::new(dhcp_score),
|
||||||
|
Box::new(load_balancer_score),
|
||||||
harmony_tui::run(
|
Box::new(tftp_score),
|
||||||
inventory,
|
Box::new(http_score),
|
||||||
topology,
|
Box::new(OPNsenseShellCommandScore {
|
||||||
vec![
|
opnsense: opnsense.get_opnsense_config(),
|
||||||
Box::new(dns_score),
|
command: "touch /tmp/helloharmonytouching".to_string(),
|
||||||
Box::new(dhcp_score),
|
}),
|
||||||
Box::new(load_balancer_score),
|
Box::new(SuccessScore {}),
|
||||||
Box::new(tftp_score),
|
Box::new(ErrorScore {}),
|
||||||
Box::new(http_score),
|
Box::new(PanicScore {}),
|
||||||
Box::new(OPNsenseShellCommandScore {
|
]);
|
||||||
opnsense: opnsense.get_opnsense_config(),
|
harmony_tui::init(maestro).await.unwrap();
|
||||||
command: "touch /tmp/helloharmonytouching".to_string(),
|
|
||||||
}),
|
|
||||||
Box::new(SuccessScore {}),
|
|
||||||
Box::new(ErrorScore {}),
|
|
||||||
Box::new(PanicScore {}),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
10
examples/postgres/Cargo.toml
Normal file
10
examples/postgres/Cargo.toml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-postgres"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
tokio = { version = "1", features = ["full"] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
async-trait = "0.1.80"
|
||||||
84
examples/postgres/src/main.rs
Normal file
84
examples/postgres/src/main.rs
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony::{
|
||||||
|
data::{PostgresDatabase, PostgresUser},
|
||||||
|
interpret::InterpretError,
|
||||||
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
|
modules::postgres::PostgresScore,
|
||||||
|
topology::{PostgresServer, Topology},
|
||||||
|
};
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct MockTopology;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Topology for MockTopology {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"MockTopology"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_ready(&self) -> Result<harmony::interpret::Outcome, InterpretError> {
|
||||||
|
Ok(harmony::interpret::Outcome::new(
|
||||||
|
harmony::interpret::InterpretStatus::SUCCESS,
|
||||||
|
"Mock topology is always ready".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl PostgresServer for MockTopology {
|
||||||
|
async fn ensure_users_exist(&self, users: Vec<PostgresUser>) -> Result<(), InterpretError> {
|
||||||
|
println!("Ensuring users exist:");
|
||||||
|
for user in users {
|
||||||
|
println!(" - {}: {}", user.name, user.password);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_databases_exist(
|
||||||
|
&self,
|
||||||
|
databases: Vec<PostgresDatabase>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
println!("Ensuring databases exist:");
|
||||||
|
for db in databases {
|
||||||
|
println!(" - {}: owner={}", db.name, db.owner);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn Error>> {
|
||||||
|
let users = vec![
|
||||||
|
PostgresUser {
|
||||||
|
name: "admin".to_string(),
|
||||||
|
password: "password".to_string(),
|
||||||
|
},
|
||||||
|
PostgresUser {
|
||||||
|
name: "user".to_string(),
|
||||||
|
password: "password".to_string(),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let databases = vec![
|
||||||
|
PostgresDatabase {
|
||||||
|
name: "app_db".to_string(),
|
||||||
|
owner: "admin".to_string(),
|
||||||
|
},
|
||||||
|
PostgresDatabase {
|
||||||
|
name: "user_db".to_string(),
|
||||||
|
owner: "user".to_string(),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let postgres_score = PostgresScore::new(users, databases);
|
||||||
|
|
||||||
|
let inventory = Inventory::empty();
|
||||||
|
let topology = MockTopology;
|
||||||
|
let maestro = Maestro::new(inventory, topology);
|
||||||
|
|
||||||
|
maestro.interpret(Box::new(postgres_score)).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
3
examples/rust/.gitignore
vendored
3
examples/rust/.gitignore
vendored
@@ -1,3 +0,0 @@
|
|||||||
Dockerfile.harmony
|
|
||||||
.harmony_generated
|
|
||||||
harmony
|
|
||||||
@@ -12,4 +12,3 @@ tokio = { workspace = true }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
base64.workspace = true
|
|
||||||
|
|||||||
@@ -1,59 +1,20 @@
|
|||||||
use std::{path::PathBuf, sync::Arc};
|
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
maestro::Maestro,
|
||||||
application::{
|
modules::application::{RustWebappScore, features::ContinuousDelivery},
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
topology::{K8sAnywhereTopology, Url},
|
||||||
features::{ContinuousDelivery, Monitoring},
|
|
||||||
},
|
|
||||||
monitoring::alert_channel::{
|
|
||||||
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
};
|
||||||
use harmony_types::net::Url;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let app = RustWebappScore {
|
||||||
name: "harmony-example-rust-webapp".to_string(),
|
name: "Example Rust Webapp".to_string(),
|
||||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
features: vec![Box::new(ContinuousDelivery {})],
|
||||||
framework: Some(RustWebFramework::Leptos),
|
|
||||||
});
|
|
||||||
|
|
||||||
let discord_receiver = DiscordWebhook {
|
|
||||||
name: "test-discord".to_string(),
|
|
||||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let webhook_receiver = WebhookReceiver {
|
let topology = K8sAnywhereTopology::from_env();
|
||||||
name: "sample-webhook-receiver".to_string(),
|
let mut maestro = Maestro::new(Inventory::autoload(), topology);
|
||||||
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
|
maestro.register_all(vec![Box::new(app)]);
|
||||||
};
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
|
|
||||||
let app = ApplicationScore {
|
|
||||||
features: vec![
|
|
||||||
Box::new(ContinuousDelivery {
|
|
||||||
application: application.clone(),
|
|
||||||
}),
|
|
||||||
Box::new(Monitoring {
|
|
||||||
application: application.clone(),
|
|
||||||
alert_receiver: vec![Box::new(discord_receiver), Box::new(webhook_receiver)],
|
|
||||||
}),
|
|
||||||
// TODO add backups, multisite ha, etc
|
|
||||||
],
|
|
||||||
application,
|
|
||||||
};
|
|
||||||
|
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
|
||||||
K8sAnywhereTopology::from_env(),
|
|
||||||
vec![Box::new(app)],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
14
examples/rust/webapp/.gitignore
vendored
14
examples/rust/webapp/.gitignore
vendored
@@ -1,14 +0,0 @@
|
|||||||
# Generated by Cargo
|
|
||||||
# will have compiled files and executables
|
|
||||||
debug/
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
|
||||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
|
||||||
Cargo.lock
|
|
||||||
|
|
||||||
# These are backup files generated by rustfmt
|
|
||||||
**/*.rs.bk
|
|
||||||
|
|
||||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
|
||||||
*.pdb
|
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "harmony-example-rust-webapp"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
crate-type = ["cdylib", "rlib"]
|
|
||||||
|
|
||||||
[workspace]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
actix-files = { version = "0.6", optional = true }
|
|
||||||
actix-web = { version = "4", optional = true, features = ["macros"] }
|
|
||||||
console_error_panic_hook = "0.1"
|
|
||||||
http = { version = "1.0.0", optional = true }
|
|
||||||
leptos = { version = "0.7.0" }
|
|
||||||
leptos_meta = { version = "0.7.0" }
|
|
||||||
leptos_actix = { version = "0.7.0", optional = true }
|
|
||||||
leptos_router = { version = "0.7.0" }
|
|
||||||
wasm-bindgen = "=0.2.100"
|
|
||||||
|
|
||||||
[features]
|
|
||||||
csr = ["leptos/csr"]
|
|
||||||
hydrate = ["leptos/hydrate"]
|
|
||||||
ssr = [
|
|
||||||
"dep:actix-files",
|
|
||||||
"dep:actix-web",
|
|
||||||
"dep:leptos_actix",
|
|
||||||
"leptos/ssr",
|
|
||||||
"leptos_meta/ssr",
|
|
||||||
"leptos_router/ssr",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Defines a size-optimized profile for the WASM bundle in release mode
|
|
||||||
[profile.wasm-release]
|
|
||||||
inherits = "release"
|
|
||||||
opt-level = 'z'
|
|
||||||
lto = true
|
|
||||||
codegen-units = 1
|
|
||||||
panic = "abort"
|
|
||||||
|
|
||||||
[package.metadata.leptos]
|
|
||||||
# The name used by wasm-bindgen/cargo-leptos for the JS/WASM bundle. Defaults to the crate name
|
|
||||||
output-name = "harmony-example-rust-webapp"
|
|
||||||
# The site root folder is where cargo-leptos generate all output. WARNING: all content of this folder will be erased on a rebuild. Use it in your server setup.
|
|
||||||
site-root = "target/site"
|
|
||||||
# The site-root relative folder where all compiled output (JS, WASM and CSS) is written
|
|
||||||
# Defaults to pkg
|
|
||||||
site-pkg-dir = "pkg"
|
|
||||||
# [Optional] The source CSS file. If it ends with .sass or .scss then it will be compiled by dart-sass into CSS. The CSS is optimized by Lightning CSS before being written to <site-root>/<site-pkg>/app.css
|
|
||||||
style-file = "style/main.scss"
|
|
||||||
# Assets source dir. All files found here will be copied and synchronized to site-root.
|
|
||||||
# The assets-dir cannot have a sub directory with the same name/path as site-pkg-dir.
|
|
||||||
#
|
|
||||||
# Optional. Env: LEPTOS_ASSETS_DIR.
|
|
||||||
assets-dir = "assets"
|
|
||||||
# The IP and port (ex: 127.0.0.1:3000) where the server serves the content. Use it in your server setup.
|
|
||||||
site-addr = "0.0.0.0:3000"
|
|
||||||
# The port to use for automatic reload monitoring
|
|
||||||
reload-port = 3001
|
|
||||||
# [Optional] Command to use when running end2end tests. It will run in the end2end dir.
|
|
||||||
# [Windows] for non-WSL use "npx.cmd playwright test"
|
|
||||||
# This binary name can be checked in Powershell with Get-Command npx
|
|
||||||
end2end-cmd = "npx playwright test"
|
|
||||||
end2end-dir = "end2end"
|
|
||||||
# The browserlist query used for optimizing the CSS.
|
|
||||||
browserquery = "defaults"
|
|
||||||
# The environment Leptos will run in, usually either "DEV" or "PROD"
|
|
||||||
env = "DEV"
|
|
||||||
# The features to use when compiling the bin target
|
|
||||||
#
|
|
||||||
# Optional. Can be over-ridden with the command line parameter --bin-features
|
|
||||||
bin-features = ["ssr"]
|
|
||||||
|
|
||||||
# If the --no-default-features flag should be used when compiling the bin target
|
|
||||||
#
|
|
||||||
# Optional. Defaults to false.
|
|
||||||
bin-default-features = false
|
|
||||||
|
|
||||||
# The features to use when compiling the lib target
|
|
||||||
#
|
|
||||||
# Optional. Can be over-ridden with the command line parameter --lib-features
|
|
||||||
lib-features = ["hydrate"]
|
|
||||||
|
|
||||||
# If the --no-default-features flag should be used when compiling the lib target
|
|
||||||
#
|
|
||||||
# Optional. Defaults to false.
|
|
||||||
lib-default-features = false
|
|
||||||
|
|
||||||
# The profile to use for the lib target when compiling for release
|
|
||||||
#
|
|
||||||
# Optional. Defaults to "release".
|
|
||||||
lib-profile-release = "wasm-release"
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
This is free and unencumbered software released into the public domain.
|
|
||||||
|
|
||||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
|
||||||
distribute this software, either in source code form or as a compiled
|
|
||||||
binary, for any purpose, commercial or non-commercial, and by any
|
|
||||||
means.
|
|
||||||
|
|
||||||
In jurisdictions that recognize copyright laws, the author or authors
|
|
||||||
of this software dedicate any and all copyright interest in the
|
|
||||||
software to the public domain. We make this dedication for the benefit
|
|
||||||
of the public at large and to the detriment of our heirs and
|
|
||||||
successors. We intend this dedication to be an overt act of
|
|
||||||
relinquishment in perpetuity of all present and future rights to this
|
|
||||||
software under copyright law.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
For more information, please refer to <https://unlicense.org>
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
<picture>
|
|
||||||
<source srcset="https://raw.githubusercontent.com/leptos-rs/leptos/main/docs/logos/Leptos_logo_Solid_White.svg" media="(prefers-color-scheme: dark)">
|
|
||||||
<img src="https://raw.githubusercontent.com/leptos-rs/leptos/main/docs/logos/Leptos_logo_RGB.svg" alt="Leptos Logo">
|
|
||||||
</picture>
|
|
||||||
|
|
||||||
# Leptos Starter Template
|
|
||||||
|
|
||||||
This is a template for use with the [Leptos](https://github.com/leptos-rs/leptos) web framework and the [cargo-leptos](https://github.com/akesson/cargo-leptos) tool.
|
|
||||||
|
|
||||||
## Creating your template repo
|
|
||||||
|
|
||||||
If you don't have `cargo-leptos` installed you can install it with
|
|
||||||
|
|
||||||
`cargo install cargo-leptos --locked`
|
|
||||||
|
|
||||||
Then run
|
|
||||||
|
|
||||||
`cargo leptos new --git leptos-rs/start-actix`
|
|
||||||
|
|
||||||
to generate a new project template (you will be prompted to enter a project name).
|
|
||||||
|
|
||||||
`cd {projectname}`
|
|
||||||
|
|
||||||
to go to your newly created project.
|
|
||||||
|
|
||||||
Of course, you should explore around the project structure, but the best place to start with your application code is in `src/app.rs`.
|
|
||||||
|
|
||||||
## Running your project
|
|
||||||
|
|
||||||
`cargo leptos watch`
|
|
||||||
By default, you can access your local project at `http://localhost:3000`
|
|
||||||
|
|
||||||
## Installing Additional Tools
|
|
||||||
|
|
||||||
By default, `cargo-leptos` uses `nightly` Rust, `cargo-generate`, and `sass`. If you run into any trouble, you may need to install one or more of these tools.
|
|
||||||
|
|
||||||
1. `rustup toolchain install nightly --allow-downgrade` - make sure you have Rust nightly
|
|
||||||
2. `rustup target add wasm32-unknown-unknown` - add the ability to compile Rust to WebAssembly
|
|
||||||
3. `cargo install cargo-generate` - install `cargo-generate` binary (should be installed automatically in future)
|
|
||||||
4. `npm install -g sass` - install `dart-sass` (should be optional in future)
|
|
||||||
|
|
||||||
## Executing a Server on a Remote Machine Without the Toolchain
|
|
||||||
After running a `cargo leptos build --release` the minimum files needed are:
|
|
||||||
|
|
||||||
1. The server binary located in `target/server/release`
|
|
||||||
2. The `site` directory and all files within located in `target/site`
|
|
||||||
|
|
||||||
Copy these files to your remote server. The directory structure should be:
|
|
||||||
```text
|
|
||||||
leptos_start
|
|
||||||
site/
|
|
||||||
```
|
|
||||||
Set the following environment variables (updating for your project as needed):
|
|
||||||
```sh
|
|
||||||
export LEPTOS_OUTPUT_NAME="leptos_start"
|
|
||||||
export LEPTOS_SITE_ROOT="site"
|
|
||||||
export LEPTOS_SITE_PKG_DIR="pkg"
|
|
||||||
export LEPTOS_SITE_ADDR="127.0.0.1:3000"
|
|
||||||
export LEPTOS_RELOAD_PORT="3001"
|
|
||||||
```
|
|
||||||
Finally, run the server binary.
|
|
||||||
|
|
||||||
## Notes about CSR and Trunk:
|
|
||||||
Although it is not recommended, you can also run your project without server integration using the feature `csr` and `trunk serve`:
|
|
||||||
|
|
||||||
`trunk serve --open --features csr`
|
|
||||||
|
|
||||||
This may be useful for integrating external tools which require a static site, e.g. `tauri`.
|
|
||||||
|
|
||||||
## Licensing
|
|
||||||
|
|
||||||
This template itself is released under the Unlicense. You should replace the LICENSE for your own application with an appropriate license if you plan to release it publicly.
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 15 KiB |
112
examples/rust/webapp/end2end/package-lock.json
generated
112
examples/rust/webapp/end2end/package-lock.json
generated
@@ -1,112 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "end2end",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"lockfileVersion": 3,
|
|
||||||
"requires": true,
|
|
||||||
"packages": {
|
|
||||||
"": {
|
|
||||||
"name": "end2end",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"license": "ISC",
|
|
||||||
"devDependencies": {
|
|
||||||
"@playwright/test": "^1.44.1",
|
|
||||||
"@types/node": "^20.12.12",
|
|
||||||
"typescript": "^5.4.5"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@playwright/test": {
|
|
||||||
"version": "1.44.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.44.1.tgz",
|
|
||||||
"integrity": "sha512-1hZ4TNvD5z9VuhNJ/walIjvMVvYkZKf71axoF/uiAqpntQJXpG64dlXhoDXE3OczPuTuvjf/M5KWFg5VAVUS3Q==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "Apache-2.0",
|
|
||||||
"dependencies": {
|
|
||||||
"playwright": "1.44.1"
|
|
||||||
},
|
|
||||||
"bin": {
|
|
||||||
"playwright": "cli.js"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=16"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@types/node": {
|
|
||||||
"version": "20.12.12",
|
|
||||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.12.tgz",
|
|
||||||
"integrity": "sha512-eWLDGF/FOSPtAvEqeRAQ4C8LSA7M1I7i0ky1I8U7kD1J5ITyW3AsRhQrKVoWf5pFKZ2kILsEGJhsI9r93PYnOw==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"undici-types": "~5.26.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/fsevents": {
|
|
||||||
"version": "2.3.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
|
|
||||||
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
|
|
||||||
"dev": true,
|
|
||||||
"hasInstallScript": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"darwin"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/playwright": {
|
|
||||||
"version": "1.44.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.44.1.tgz",
|
|
||||||
"integrity": "sha512-qr/0UJ5CFAtloI3avF95Y0L1xQo6r3LQArLIg/z/PoGJ6xa+EwzrwO5lpNr/09STxdHuUoP2mvuELJS+hLdtgg==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "Apache-2.0",
|
|
||||||
"dependencies": {
|
|
||||||
"playwright-core": "1.44.1"
|
|
||||||
},
|
|
||||||
"bin": {
|
|
||||||
"playwright": "cli.js"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=16"
|
|
||||||
},
|
|
||||||
"optionalDependencies": {
|
|
||||||
"fsevents": "2.3.2"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/playwright-core": {
|
|
||||||
"version": "1.44.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.44.1.tgz",
|
|
||||||
"integrity": "sha512-wh0JWtYTrhv1+OSsLPgFzGzt67Y7BE/ZS3jEqgGBlp2ppp1ZDj8c+9IARNW4dwf1poq5MgHreEM2KV/GuR4cFA==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "Apache-2.0",
|
|
||||||
"bin": {
|
|
||||||
"playwright-core": "cli.js"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=16"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/typescript": {
|
|
||||||
"version": "5.4.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz",
|
|
||||||
"integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "Apache-2.0",
|
|
||||||
"bin": {
|
|
||||||
"tsc": "bin/tsc",
|
|
||||||
"tsserver": "bin/tsserver"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=14.17"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/undici-types": {
|
|
||||||
"version": "5.26.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
|
||||||
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "end2end",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"description": "",
|
|
||||||
"main": "index.js",
|
|
||||||
"scripts": {},
|
|
||||||
"keywords": [],
|
|
||||||
"author": "",
|
|
||||||
"license": "ISC",
|
|
||||||
"devDependencies": {
|
|
||||||
"@playwright/test": "^1.44.1",
|
|
||||||
"@types/node": "^20.12.12",
|
|
||||||
"typescript": "^5.4.5"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,104 +0,0 @@
|
|||||||
import { devices, defineConfig } from "@playwright/test";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Read environment variables from file.
|
|
||||||
* https://github.com/motdotla/dotenv
|
|
||||||
*/
|
|
||||||
// require('dotenv').config();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* See https://playwright.dev/docs/test-configuration.
|
|
||||||
*/
|
|
||||||
export default defineConfig({
|
|
||||||
testDir: "./tests",
|
|
||||||
/* Maximum time one test can run for. */
|
|
||||||
timeout: 30 * 1000,
|
|
||||||
expect: {
|
|
||||||
/**
|
|
||||||
* Maximum time expect() should wait for the condition to be met.
|
|
||||||
* For example in `await expect(locator).toHaveText();`
|
|
||||||
*/
|
|
||||||
timeout: 5000,
|
|
||||||
},
|
|
||||||
/* Run tests in files in parallel */
|
|
||||||
fullyParallel: true,
|
|
||||||
/* Fail the build on CI if you accidentally left test.only in the source code. */
|
|
||||||
forbidOnly: !!process.env.CI,
|
|
||||||
/* Retry on CI only */
|
|
||||||
retries: process.env.CI ? 2 : 0,
|
|
||||||
/* Opt out of parallel tests on CI. */
|
|
||||||
workers: process.env.CI ? 1 : undefined,
|
|
||||||
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
|
|
||||||
reporter: "html",
|
|
||||||
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
|
|
||||||
use: {
|
|
||||||
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
|
|
||||||
actionTimeout: 0,
|
|
||||||
/* Base URL to use in actions like `await page.goto('/')`. */
|
|
||||||
// baseURL: 'http://localhost:3000',
|
|
||||||
|
|
||||||
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
|
||||||
trace: "on-first-retry",
|
|
||||||
},
|
|
||||||
|
|
||||||
/* Configure projects for major browsers */
|
|
||||||
projects: [
|
|
||||||
{
|
|
||||||
name: "chromium",
|
|
||||||
use: {
|
|
||||||
...devices["Desktop Chrome"],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
name: "firefox",
|
|
||||||
use: {
|
|
||||||
...devices["Desktop Firefox"],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
name: "webkit",
|
|
||||||
use: {
|
|
||||||
...devices["Desktop Safari"],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
/* Test against mobile viewports. */
|
|
||||||
// {
|
|
||||||
// name: 'Mobile Chrome',
|
|
||||||
// use: {
|
|
||||||
// ...devices['Pixel 5'],
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// name: 'Mobile Safari',
|
|
||||||
// use: {
|
|
||||||
// ...devices['iPhone 12'],
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
|
|
||||||
/* Test against branded browsers. */
|
|
||||||
// {
|
|
||||||
// name: 'Microsoft Edge',
|
|
||||||
// use: {
|
|
||||||
// channel: 'msedge',
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// name: 'Google Chrome',
|
|
||||||
// use: {
|
|
||||||
// channel: 'chrome',
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
],
|
|
||||||
|
|
||||||
/* Folder for test artifacts such as screenshots, videos, traces, etc. */
|
|
||||||
// outputDir: 'test-results/',
|
|
||||||
|
|
||||||
/* Run your local dev server before starting the tests */
|
|
||||||
// webServer: {
|
|
||||||
// command: 'npm run start',
|
|
||||||
// port: 3000,
|
|
||||||
// },
|
|
||||||
});
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
import { test, expect } from "@playwright/test";
|
|
||||||
|
|
||||||
test("homepage has title and links to intro page", async ({ page }) => {
|
|
||||||
await page.goto("http://localhost:3000/");
|
|
||||||
|
|
||||||
await expect(page).toHaveTitle("Welcome to Leptos");
|
|
||||||
|
|
||||||
await expect(page.locator("h1")).toHaveText("Welcome to Leptos!");
|
|
||||||
});
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
{
|
|
||||||
"compilerOptions": {
|
|
||||||
/* Visit https://aka.ms/tsconfig to read more about this file */
|
|
||||||
|
|
||||||
/* Projects */
|
|
||||||
// "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
|
|
||||||
// "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
|
|
||||||
// "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */
|
|
||||||
// "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */
|
|
||||||
// "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
|
|
||||||
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
|
|
||||||
|
|
||||||
/* Language and Environment */
|
|
||||||
"target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
|
|
||||||
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
|
|
||||||
// "jsx": "preserve", /* Specify what JSX code is generated. */
|
|
||||||
// "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */
|
|
||||||
// "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
|
|
||||||
// "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
|
|
||||||
// "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
|
|
||||||
// "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
|
|
||||||
// "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
|
|
||||||
// "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
|
|
||||||
// "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
|
|
||||||
// "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */
|
|
||||||
|
|
||||||
/* Modules */
|
|
||||||
"module": "commonjs", /* Specify what module code is generated. */
|
|
||||||
// "rootDir": "./", /* Specify the root folder within your source files. */
|
|
||||||
// "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */
|
|
||||||
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
|
|
||||||
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
|
|
||||||
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
|
|
||||||
// "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
|
|
||||||
// "types": [], /* Specify type package names to be included without being referenced in a source file. */
|
|
||||||
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
|
|
||||||
// "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */
|
|
||||||
// "allowImportingTsExtensions": true, /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */
|
|
||||||
// "resolvePackageJsonExports": true, /* Use the package.json 'exports' field when resolving package imports. */
|
|
||||||
// "resolvePackageJsonImports": true, /* Use the package.json 'imports' field when resolving imports. */
|
|
||||||
// "customConditions": [], /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */
|
|
||||||
// "resolveJsonModule": true, /* Enable importing .json files. */
|
|
||||||
// "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */
|
|
||||||
// "noResolve": true, /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */
|
|
||||||
|
|
||||||
/* JavaScript Support */
|
|
||||||
// "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
|
|
||||||
// "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
|
|
||||||
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */
|
|
||||||
|
|
||||||
/* Emit */
|
|
||||||
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
|
|
||||||
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
|
|
||||||
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
|
|
||||||
// "sourceMap": true, /* Create source map files for emitted JavaScript files. */
|
|
||||||
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
|
|
||||||
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
|
|
||||||
// "outDir": "./", /* Specify an output folder for all emitted files. */
|
|
||||||
// "removeComments": true, /* Disable emitting comments. */
|
|
||||||
// "noEmit": true, /* Disable emitting files from a compilation. */
|
|
||||||
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
|
|
||||||
// "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */
|
|
||||||
// "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
|
|
||||||
// "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
|
|
||||||
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
|
|
||||||
// "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
|
|
||||||
// "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
|
|
||||||
// "newLine": "crlf", /* Set the newline character for emitting files. */
|
|
||||||
// "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
|
|
||||||
// "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */
|
|
||||||
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
|
|
||||||
// "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */
|
|
||||||
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
|
|
||||||
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
|
|
||||||
|
|
||||||
/* Interop Constraints */
|
|
||||||
// "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
|
|
||||||
// "verbatimModuleSyntax": true, /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */
|
|
||||||
// "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
|
|
||||||
"esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
|
|
||||||
// "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
|
|
||||||
"forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
|
|
||||||
|
|
||||||
/* Type Checking */
|
|
||||||
"strict": true, /* Enable all strict type-checking options. */
|
|
||||||
// "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */
|
|
||||||
// "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */
|
|
||||||
// "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
|
|
||||||
// "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
|
|
||||||
// "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
|
|
||||||
// "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */
|
|
||||||
// "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */
|
|
||||||
// "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
|
|
||||||
// "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */
|
|
||||||
// "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */
|
|
||||||
// "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
|
|
||||||
// "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
|
|
||||||
// "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
|
|
||||||
// "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */
|
|
||||||
// "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
|
|
||||||
// "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */
|
|
||||||
// "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
|
|
||||||
// "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
|
|
||||||
|
|
||||||
/* Completeness */
|
|
||||||
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
|
|
||||||
"skipLibCheck": true /* Skip type checking all .d.ts files. */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
use leptos::prelude::*;
|
|
||||||
use leptos_meta::{provide_meta_context, Stylesheet, Title};
|
|
||||||
use leptos_router::{
|
|
||||||
components::{Route, Router, Routes},
|
|
||||||
StaticSegment, WildcardSegment,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[component]
|
|
||||||
pub fn App() -> impl IntoView {
|
|
||||||
// Provides context that manages stylesheets, titles, meta tags, etc.
|
|
||||||
provide_meta_context();
|
|
||||||
|
|
||||||
view! {
|
|
||||||
// injects a stylesheet into the document <head>
|
|
||||||
// id=leptos means cargo-leptos will hot-reload this stylesheet
|
|
||||||
<Stylesheet id="leptos" href="/pkg/harmony-example-rust-webapp.css"/>
|
|
||||||
|
|
||||||
// sets the document title
|
|
||||||
<Title text="Welcome to Leptos"/>
|
|
||||||
|
|
||||||
// content for this welcome page
|
|
||||||
<Router>
|
|
||||||
<main>
|
|
||||||
<Routes fallback=move || "Not found.">
|
|
||||||
<Route path=StaticSegment("") view=HomePage/>
|
|
||||||
<Route path=WildcardSegment("any") view=NotFound/>
|
|
||||||
</Routes>
|
|
||||||
</main>
|
|
||||||
</Router>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Renders the home page of your application.
|
|
||||||
#[component]
|
|
||||||
fn HomePage() -> impl IntoView {
|
|
||||||
// Creates a reactive value to update the button
|
|
||||||
let count = RwSignal::new(0);
|
|
||||||
let on_click = move |_| *count.write() += 1;
|
|
||||||
|
|
||||||
view! {
|
|
||||||
<h1>"Welcome to Leptos!"</h1>
|
|
||||||
<button on:click=on_click>"Click Me: " {count}</button>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 404 - Not Found
|
|
||||||
#[component]
|
|
||||||
fn NotFound() -> impl IntoView {
|
|
||||||
// set an HTTP status code 404
|
|
||||||
// this is feature gated because it can only be done during
|
|
||||||
// initial server-side rendering
|
|
||||||
// if you navigate to the 404 page subsequently, the status
|
|
||||||
// code will not be set because there is not a new HTTP request
|
|
||||||
// to the server
|
|
||||||
#[cfg(feature = "ssr")]
|
|
||||||
{
|
|
||||||
// this can be done inline because it's synchronous
|
|
||||||
// if it were async, we'd use a server function
|
|
||||||
let resp = expect_context::<leptos_actix::ResponseOptions>();
|
|
||||||
resp.set_status(actix_web::http::StatusCode::NOT_FOUND);
|
|
||||||
}
|
|
||||||
|
|
||||||
view! {
|
|
||||||
<h1>"Not Found"</h1>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
pub mod app;
|
|
||||||
|
|
||||||
#[cfg(feature = "hydrate")]
|
|
||||||
#[wasm_bindgen::prelude::wasm_bindgen]
|
|
||||||
pub fn hydrate() {
|
|
||||||
use app::*;
|
|
||||||
console_error_panic_hook::set_once();
|
|
||||||
leptos::mount::hydrate_body(App);
|
|
||||||
}
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
#[cfg(feature = "ssr")]
|
|
||||||
#[actix_web::main]
|
|
||||||
async fn main() -> std::io::Result<()> {
|
|
||||||
use actix_files::Files;
|
|
||||||
use actix_web::*;
|
|
||||||
use leptos::prelude::*;
|
|
||||||
use leptos::config::get_configuration;
|
|
||||||
use leptos_meta::MetaTags;
|
|
||||||
use leptos_actix::{generate_route_list, LeptosRoutes};
|
|
||||||
use harmony_example_rust_webapp::app::*;
|
|
||||||
|
|
||||||
let conf = get_configuration(None).unwrap();
|
|
||||||
let addr = conf.leptos_options.site_addr;
|
|
||||||
|
|
||||||
HttpServer::new(move || {
|
|
||||||
// Generate the list of routes in your Leptos App
|
|
||||||
let routes = generate_route_list(App);
|
|
||||||
let leptos_options = &conf.leptos_options;
|
|
||||||
let site_root = leptos_options.site_root.clone().to_string();
|
|
||||||
|
|
||||||
println!("listening on http://{}", &addr);
|
|
||||||
|
|
||||||
App::new()
|
|
||||||
// serve JS/WASM/CSS from `pkg`
|
|
||||||
.service(Files::new("/pkg", format!("{site_root}/pkg")))
|
|
||||||
// serve other assets from the `assets` directory
|
|
||||||
.service(Files::new("/assets", &site_root))
|
|
||||||
// serve the favicon from /favicon.ico
|
|
||||||
.service(favicon)
|
|
||||||
.leptos_routes(routes, {
|
|
||||||
let leptos_options = leptos_options.clone();
|
|
||||||
move || {
|
|
||||||
view! {
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8"/>
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
|
||||||
<AutoReload options=leptos_options.clone() />
|
|
||||||
<HydrationScripts options=leptos_options.clone()/>
|
|
||||||
<MetaTags/>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<App/>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.app_data(web::Data::new(leptos_options.to_owned()))
|
|
||||||
//.wrap(middleware::Compress::default())
|
|
||||||
})
|
|
||||||
.bind(&addr)?
|
|
||||||
.run()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "ssr")]
|
|
||||||
#[actix_web::get("favicon.ico")]
|
|
||||||
async fn favicon(
|
|
||||||
leptos_options: actix_web::web::Data<leptos::config::LeptosOptions>,
|
|
||||||
) -> actix_web::Result<actix_files::NamedFile> {
|
|
||||||
let leptos_options = leptos_options.into_inner();
|
|
||||||
let site_root = &leptos_options.site_root;
|
|
||||||
Ok(actix_files::NamedFile::open(format!(
|
|
||||||
"{site_root}/favicon.ico"
|
|
||||||
))?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(any(feature = "ssr", feature = "csr")))]
|
|
||||||
pub fn main() {
|
|
||||||
// no client-side main function
|
|
||||||
// unless we want this to work with e.g., Trunk for pure client-side testing
|
|
||||||
// see lib.rs for hydration function instead
|
|
||||||
// see optional feature `csr` instead
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(not(feature = "ssr"), feature = "csr"))]
|
|
||||||
pub fn main() {
|
|
||||||
// a client-side main function is required for using `trunk serve`
|
|
||||||
// prefer using `cargo leptos serve` instead
|
|
||||||
// to run: `trunk serve --open --features csr`
|
|
||||||
use harmony_example_rust_webapp::app::*;
|
|
||||||
|
|
||||||
console_error_panic_hook::set_once();
|
|
||||||
|
|
||||||
leptos::mount_to_body(App);
|
|
||||||
}
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
body {
|
|
||||||
font-family: sans-serif;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
@@ -1,30 +1,30 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
|
data::Id,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::tenant::TenantScore,
|
modules::tenant::TenantScore,
|
||||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let tenant = TenantScore {
|
let tenant = TenantScore {
|
||||||
config: TenantConfig {
|
config: TenantConfig {
|
||||||
id: Id::from_str("test-tenant-id").unwrap(),
|
id: Id::from_str("test-tenant-id"),
|
||||||
name: "testtenant".to_string(),
|
name: "testtenant".to_string(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
harmony_cli::run(
|
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||||
Inventory::autoload(),
|
Inventory::autoload(),
|
||||||
K8sAnywhereTopology::from_env(),
|
K8sAnywhereTopology::from_env(),
|
||||||
vec![Box::new(tenant)],
|
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
maestro.register_all(vec![Box::new(tenant)]);
|
||||||
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO write tests
|
// TODO write tests
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use std::net::{SocketAddr, SocketAddrV4};
|
|||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
dns::DnsScore,
|
dns::DnsScore,
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
@@ -15,19 +16,18 @@ use harmony_macros::ipv4;
|
|||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
harmony_tui::run(
|
let inventory = Inventory::autoload();
|
||||||
Inventory::autoload(),
|
let topology = DummyInfra {};
|
||||||
DummyInfra {},
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
vec![
|
|
||||||
Box::new(SuccessScore {}),
|
maestro.register_all(vec![
|
||||||
Box::new(ErrorScore {}),
|
Box::new(SuccessScore {}),
|
||||||
Box::new(PanicScore {}),
|
Box::new(ErrorScore {}),
|
||||||
Box::new(DnsScore::new(vec![], None)),
|
Box::new(PanicScore {}),
|
||||||
Box::new(build_large_score()),
|
Box::new(DnsScore::new(vec![], None)),
|
||||||
],
|
Box::new(build_large_score()),
|
||||||
)
|
]);
|
||||||
.await
|
harmony_tui::init(maestro).await.unwrap();
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_large_score() -> LoadBalancerScore {
|
fn build_large_score() -> LoadBalancerScore {
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "example_validate_ceph_cluster_health"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
|
||||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
|
||||||
tokio.workspace = true
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
use harmony::{
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::storage::ceph::ceph_validate_health_score::CephVerifyClusterHealth,
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let ceph_health_score = CephVerifyClusterHealth {
|
|
||||||
rook_ceph_namespace: "rook-ceph".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let topology = K8sAnywhereTopology::from_env();
|
|
||||||
let inventory = Inventory::autoload();
|
|
||||||
harmony_cli::run(inventory, topology, vec![Box::new(ceph_health_score)], None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -5,17 +5,16 @@ version.workspace = true
|
|||||||
readme.workspace = true
|
readme.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[features]
|
|
||||||
testing = []
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
rand = "0.9"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
|
libredfish = "0.1.1"
|
||||||
|
reqwest = { version = "0.11", features = ["blocking", "json"] }
|
||||||
russh = "0.45.0"
|
russh = "0.45.0"
|
||||||
rust-ipmi = "0.1.1"
|
rust-ipmi = "0.1.1"
|
||||||
semver = "1.0.23"
|
semver = "1.0.23"
|
||||||
serde.workspace = true
|
serde = { version = "1.0.209", features = ["derive"] }
|
||||||
serde_json.workspace = true
|
serde_json = "1.0.127"
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
derive-new.workspace = true
|
derive-new.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
@@ -28,28 +27,29 @@ harmony_macros = { path = "../harmony_macros" }
|
|||||||
harmony_types = { path = "../harmony_types" }
|
harmony_types = { path = "../harmony_types" }
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
kube = { workspace = true, features = ["derive"] }
|
kube.workspace = true
|
||||||
k8s-openapi.workspace = true
|
k8s-openapi.workspace = true
|
||||||
serde_yaml.workspace = true
|
serde_yaml.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
serde-value.workspace = true
|
serde-value.workspace = true
|
||||||
|
inquire.workspace = true
|
||||||
helm-wrapper-rs = "0.4.0"
|
helm-wrapper-rs = "0.4.0"
|
||||||
non-blank-string-rs = "1.0.4"
|
non-blank-string-rs = "1.0.4"
|
||||||
k3d-rs = { path = "../k3d" }
|
k3d-rs = { path = "../k3d" }
|
||||||
directories.workspace = true
|
directories = "6.0.0"
|
||||||
lazy_static.workspace = true
|
lazy_static = "1.5.0"
|
||||||
dockerfile_builder = "0.1.5"
|
dockerfile_builder = "0.1.5"
|
||||||
temp-file = "0.1.9"
|
temp-file = "0.1.9"
|
||||||
convert_case.workspace = true
|
convert_case.workspace = true
|
||||||
email_address = "0.2.9"
|
email_address = "0.2.9"
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
fqdn = { version = "0.4.6", features = [
|
fqdn = { version = "0.4.6", features = [
|
||||||
"domain-label-cannot-start-or-end-with-hyphen",
|
"domain-label-cannot-start-or-end-with-hyphen",
|
||||||
"domain-label-length-limited-to-63",
|
"domain-label-length-limited-to-63",
|
||||||
"domain-name-without-special-chars",
|
"domain-name-without-special-chars",
|
||||||
"domain-name-length-limited-to-255",
|
"domain-name-length-limited-to-255",
|
||||||
"punycode",
|
"punycode",
|
||||||
"serde",
|
"serde",
|
||||||
] }
|
] }
|
||||||
temp-dir = "0.1.14"
|
temp-dir = "0.1.14"
|
||||||
dyn-clone = "1.0.19"
|
dyn-clone = "1.0.19"
|
||||||
@@ -57,21 +57,3 @@ similar.workspace = true
|
|||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
tokio-util = "0.7.15"
|
tokio-util = "0.7.15"
|
||||||
strum = { version = "0.27.1", features = ["derive"] }
|
strum = { version = "0.27.1", features = ["derive"] }
|
||||||
tempfile.workspace = true
|
|
||||||
serde_with = "3.14.0"
|
|
||||||
schemars = "0.8.22"
|
|
||||||
kube-derive = "1.1.0"
|
|
||||||
bollard.workspace = true
|
|
||||||
tar.workspace = true
|
|
||||||
base64.workspace = true
|
|
||||||
thiserror.workspace = true
|
|
||||||
once_cell = "1.21.3"
|
|
||||||
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
|
||||||
harmony_secret_derive = { path = "../harmony_secret_derive" }
|
|
||||||
harmony_secret = { path = "../harmony_secret" }
|
|
||||||
askama.workspace = true
|
|
||||||
sqlx.workspace = true
|
|
||||||
inquire.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
pretty_assertions.workspace = true
|
|
||||||
|
|||||||
Binary file not shown.
15
harmony/src/domain/config.rs
Normal file
15
harmony/src/domain/config.rs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
use lazy_static::lazy_static;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref HARMONY_CONFIG_DIR: PathBuf = directories::BaseDirs::new()
|
||||||
|
.unwrap()
|
||||||
|
.data_dir()
|
||||||
|
.join("harmony");
|
||||||
|
pub static ref REGISTRY_URL: String =
|
||||||
|
std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string());
|
||||||
|
pub static ref REGISTRY_PROJECT: String =
|
||||||
|
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
|
||||||
|
pub static ref DRY_RUN: bool =
|
||||||
|
std::env::var("HARMONY_DRY_RUN").map_or(true, |value| value.parse().unwrap_or(true));
|
||||||
|
}
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
pub mod secret;
|
|
||||||
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
pub static ref HARMONY_DATA_DIR: PathBuf = directories::BaseDirs::new()
|
|
||||||
.unwrap()
|
|
||||||
.data_dir()
|
|
||||||
.join("harmony");
|
|
||||||
pub static ref REGISTRY_URL: String =
|
|
||||||
std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string());
|
|
||||||
pub static ref REGISTRY_PROJECT: String =
|
|
||||||
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
|
|
||||||
pub static ref DRY_RUN: bool =
|
|
||||||
std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false));
|
|
||||||
pub static ref DEFAULT_DATABASE_URL: String = "sqlite://harmony.sqlite".to_string();
|
|
||||||
pub static ref DATABASE_URL: String = std::env::var("HARMONY_DATABASE_URL")
|
|
||||||
.map(|value| if value.is_empty() {
|
|
||||||
(*DEFAULT_DATABASE_URL).clone()
|
|
||||||
} else {
|
|
||||||
value
|
|
||||||
})
|
|
||||||
.unwrap_or((*DEFAULT_DATABASE_URL).clone());
|
|
||||||
}
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
use harmony_secret_derive::Secret;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
|
||||||
pub struct OPNSenseFirewallCredentials {
|
|
||||||
pub username: String,
|
|
||||||
pub password: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO we need a better way to handle multiple "instances" of the same secret structure.
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
|
||||||
pub struct SshKeyPair {
|
|
||||||
pub private: String,
|
|
||||||
pub public: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
|
||||||
pub struct RedhatSecret {
|
|
||||||
pub pull_secret: String,
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct FileContent {
|
|
||||||
pub path: FilePath,
|
|
||||||
pub content: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub enum FilePath {
|
|
||||||
Relative(String),
|
|
||||||
Absolute(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for FilePath {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
FilePath::Relative(path) => f.write_fmt(format_args!("./{path}")),
|
|
||||||
FilePath::Absolute(path) => f.write_fmt(format_args!("/{path}")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
use rand::distr::Alphanumeric;
|
use rand::distr::Alphanumeric;
|
||||||
use rand::distr::SampleString;
|
use rand::distr::SampleString;
|
||||||
use std::str::FromStr;
|
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
use std::time::UNIX_EPOCH;
|
use std::time::UNIX_EPOCH;
|
||||||
|
|
||||||
@@ -25,27 +24,13 @@ pub struct Id {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Id {
|
impl Id {
|
||||||
pub fn empty() -> Self {
|
pub fn from_string(value: String) -> Self {
|
||||||
Id {
|
|
||||||
value: String::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for Id {
|
|
||||||
type Err = ();
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
Ok(Id {
|
|
||||||
value: s.to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<String> for Id {
|
|
||||||
fn from(value: String) -> Self {
|
|
||||||
Self { value }
|
Self { value }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn from_str(value: &str) -> Self {
|
||||||
|
Self::from_string(value.to_string())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Id {
|
impl std::fmt::Display for Id {
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
mod file;
|
mod id;
|
||||||
mod version;
|
mod version;
|
||||||
pub use file::*;
|
pub use id::*;
|
||||||
pub use version::*;
|
pub use version::*;
|
||||||
|
|
||||||
|
mod postgres;
|
||||||
|
pub use postgres::*;
|
||||||
|
|||||||
13
harmony/src/domain/data/postgres.rs
Normal file
13
harmony/src/domain/data/postgres.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct PostgresUser {
|
||||||
|
pub name: String,
|
||||||
|
pub password: String, // In a real scenario, this should be a secret type
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct PostgresDatabase {
|
||||||
|
pub name: String,
|
||||||
|
pub owner: String,
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user