Compare commits
54 Commits
feat/webho
...
opnsense_u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
670b701f6a | ||
|
|
1eaae2016a | ||
|
|
c4f4a58dcf | ||
|
|
537da5800f | ||
| 3be2fa246c | |||
| 9452cf5616 | |||
| 9b7456e148 | |||
| 98f3f82ad5 | |||
| 3eca409f8d | |||
| c11a31c7a9 | |||
| 1a6d72dc17 | |||
| df9e21807e | |||
| b1bf4fd4d5 | |||
| f702ecd8c9 | |||
| a19b52e690 | |||
| b73f2e76d0 | |||
| b4534c6ee0 | |||
| 6149249a6c | |||
| d9935e20cb | |||
| 7b0f3b79b1 | |||
| e6612245a5 | |||
| b4f5b91a57 | |||
| d317c0ba76 | |||
| 539b8299ae | |||
| 5a89495c61 | |||
| fb7849c010 | |||
| 6371009c6f | |||
| a4aa685a4f | |||
| 6bf10b093c | |||
| 3eecc2f590 | |||
| 3959c07261 | |||
| e50c01c0b3 | |||
| 286460d59e | |||
| 4baa3ae707 | |||
| 82119076cf | |||
| f2a350fae6 | |||
| 197770a603 | |||
| ab69a2c264 | |||
| e857efa92f | |||
| 2ff3f4afa9 | |||
| 2f6a11ead7 | |||
| 7de9860dcf | |||
| 6e884cff3a | |||
| c74c51090a | |||
| 8ae0d6b548 | |||
| ee02906ce9 | |||
| 284cc6afd7 | |||
| 9bf6aac82e | |||
| 460c8b59e1 | |||
| 8e857bc72a | |||
| e8d55d27e4 | |||
| fea7e9ddb9 | |||
| 55143dcad4 | |||
| acfb93f1a2 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
target
|
||||
private_repos
|
||||
log/
|
||||
*.tgz
|
||||
.gitignore
|
||||
|
||||
957
Cargo.lock
generated
957
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
54
Cargo.toml
54
Cargo.toml
@@ -20,34 +20,40 @@ readme = "README.md"
|
||||
license = "GNU AGPL v3"
|
||||
|
||||
[workspace.dependencies]
|
||||
log = "0.4.22"
|
||||
env_logger = "0.11.5"
|
||||
derive-new = "0.7.0"
|
||||
async-trait = "0.1.82"
|
||||
tokio = { version = "1.40.0", features = [
|
||||
log = "0.4"
|
||||
env_logger = "0.11"
|
||||
derive-new = "0.7"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1.40", features = [
|
||||
"io-std",
|
||||
"fs",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
cidr = { features = ["serde"], version = "0.2" }
|
||||
russh = "0.45.0"
|
||||
russh-keys = "0.45.0"
|
||||
rand = "0.8.5"
|
||||
url = "2.5.4"
|
||||
kube = "0.98.0"
|
||||
k8s-openapi = { version = "0.24.0", features = ["v1_30"] }
|
||||
serde_yaml = "0.9.34"
|
||||
serde-value = "0.7.0"
|
||||
http = "1.2.0"
|
||||
inquire = "0.7.5"
|
||||
convert_case = "0.8.0"
|
||||
russh = "0.45"
|
||||
russh-keys = "0.45"
|
||||
rand = "0.8"
|
||||
url = "2.5"
|
||||
kube = { version = "1.1.0", features = [
|
||||
"config",
|
||||
"client",
|
||||
"runtime",
|
||||
"rustls-tls",
|
||||
"ws",
|
||||
"jsonpatch",
|
||||
] }
|
||||
k8s-openapi = { version = "0.25", features = ["v1_30"] }
|
||||
serde_yaml = "0.9"
|
||||
serde-value = "0.7"
|
||||
http = "1.2"
|
||||
inquire = "0.7"
|
||||
convert_case = "0.8"
|
||||
chrono = "0.4"
|
||||
|
||||
[workspace.dependencies.uuid]
|
||||
version = "1.11.0"
|
||||
features = [
|
||||
"v4", # Lets you generate random UUIDs
|
||||
"fast-rng", # Use a faster (but still sufficiently random) RNG
|
||||
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
|
||||
]
|
||||
similar = "2"
|
||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
pretty_assertions = "1.4.1"
|
||||
bollard = "0.19.1"
|
||||
base64 = "0.22.1"
|
||||
tar = "0.4.44"
|
||||
figment = { version = "0.10.19", features = ["env"] }
|
||||
|
||||
78
adr/013-monitoring-notifications.md
Normal file
78
adr/013-monitoring-notifications.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Architecture Decision Record: Monitoring Notifications
|
||||
|
||||
Initial Author: Taha Hawa
|
||||
|
||||
Initial Date: 2025-06-26
|
||||
|
||||
Last Updated Date: 2025-06-26
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
We need to send notifications (typically from AlertManager/Prometheus) and we need to receive said notifications on mobile devices for sure in some way, whether it's push messages, SMS, phone call, email, etc or all of the above.
|
||||
|
||||
## Decision
|
||||
|
||||
We should go with https://ntfy.sh except host it ourselves.
|
||||
|
||||
`ntfy` is an open source solution written in Go that has the features we need.
|
||||
|
||||
## Rationale
|
||||
|
||||
`ntfy` has pretty much everything we need (push notifications, email forwarding, receives via webhook), and nothing/not much we don't. Good fit, lightweight.
|
||||
|
||||
## Consequences
|
||||
|
||||
Pros:
|
||||
|
||||
- topics, with ACLs
|
||||
- lightweight
|
||||
- reliable
|
||||
- easy to configure
|
||||
- mobile app
|
||||
- the mobile app can listen via websocket, poll, or receive via Firebase/GCM on Android, or similar on iOS.
|
||||
- Forward to email
|
||||
- Text-to-Speech phone call messages using Twilio integration
|
||||
- Operates based on simple HTTP requests/Webhooks, easily usable via AlertManager
|
||||
|
||||
Cons:
|
||||
|
||||
- No SMS pushes
|
||||
- SQLite DB, makes it harder to HA/scale
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
[AWS SNS](https://aws.amazon.com/sns/):
|
||||
Pros:
|
||||
|
||||
- highly reliable
|
||||
- no hosting needed
|
||||
|
||||
Cons:
|
||||
|
||||
- no control, not self hosted
|
||||
- costs (per usage)
|
||||
|
||||
[Apprise](https://github.com/caronc/apprise):
|
||||
Pros:
|
||||
|
||||
- Way more ways of sending notifications
|
||||
- Can use ntfy as one of the backends/ways of sending
|
||||
|
||||
Cons:
|
||||
|
||||
- Way too overkill for what we need in terms of features
|
||||
|
||||
[Gotify](https://github.com/gotify/server):
|
||||
Pros:
|
||||
|
||||
- simple, lightweight, golang, etc
|
||||
|
||||
Cons:
|
||||
|
||||
- Pushes topics are per-user
|
||||
|
||||
## Additional Notes
|
||||
@@ -14,8 +14,8 @@ harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
kube = "0.98.0"
|
||||
k8s-openapi = { version = "0.24.0", features = [ "v1_30" ] }
|
||||
kube = "1.1.0"
|
||||
k8s-openapi = { version = "0.25.0", features = ["v1_30"] }
|
||||
http = "1.2.0"
|
||||
serde_yaml = "0.9.34"
|
||||
inquire.workspace = true
|
||||
|
||||
@@ -8,5 +8,6 @@ license.workspace = true
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
harmony_macros = { version = "0.1.0", path = "../../harmony_macros" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
@@ -5,7 +7,13 @@ use harmony::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{
|
||||
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
|
||||
ServiceMonitorEndpoint,
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus::alerts::{
|
||||
infra::dell_server::{
|
||||
@@ -41,9 +49,30 @@ async fn main() {
|
||||
],
|
||||
);
|
||||
|
||||
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
||||
port: Some("80".to_string()),
|
||||
path: "/metrics".to_string(),
|
||||
scheme: HTTPScheme::HTTP,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let service_monitor = ServiceMonitor {
|
||||
name: "test-service-monitor".to_string(),
|
||||
selector: Selector {
|
||||
match_labels: HashMap::new(),
|
||||
match_expressions: vec![MatchExpression {
|
||||
key: "test".to_string(),
|
||||
operator: Operator::In,
|
||||
values: vec!["test-service".to_string()],
|
||||
}],
|
||||
},
|
||||
endpoints: vec![service_monitor_endpoint],
|
||||
..Default::default()
|
||||
};
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(discord_receiver)],
|
||||
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
|
||||
13
examples/monitoring_with_tenant/Cargo.toml
Normal file
13
examples/monitoring_with_tenant/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "example-monitoring-with-tenant"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
cidr.workspace = true
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
90
examples/monitoring_with_tenant/src/main.rs
Normal file
90
examples/monitoring_with_tenant/src/main.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{
|
||||
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
|
||||
ServiceMonitorEndpoint,
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus::alerts::k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{
|
||||
K8sAnywhereTopology, Url,
|
||||
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
||||
},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let tenant = TenantScore {
|
||||
config: TenantConfig {
|
||||
id: Id::from_string("1234".to_string()),
|
||||
name: "test-tenant".to_string(),
|
||||
resource_limits: ResourceLimits {
|
||||
cpu_request_cores: 6.0,
|
||||
cpu_limit_cores: 4.0,
|
||||
memory_request_gb: 4.0,
|
||||
memory_limit_gb: 4.0,
|
||||
storage_total_gb: 10.0,
|
||||
},
|
||||
network_policy: TenantNetworkPolicy::default(),
|
||||
},
|
||||
};
|
||||
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
};
|
||||
|
||||
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
||||
|
||||
let additional_rules =
|
||||
AlertManagerRuleGroup::new("pvc-alerts", vec![high_pvc_fill_rate_over_two_days_alert]);
|
||||
|
||||
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
||||
port: Some("80".to_string()),
|
||||
path: "/metrics".to_string(),
|
||||
scheme: HTTPScheme::HTTP,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let service_monitor = ServiceMonitor {
|
||||
name: "test-service-monitor".to_string(),
|
||||
selector: Selector {
|
||||
match_labels: HashMap::new(),
|
||||
match_expressions: vec![MatchExpression {
|
||||
key: "test".to_string(),
|
||||
operator: Operator::In,
|
||||
values: vec!["test-service".to_string()],
|
||||
}],
|
||||
},
|
||||
endpoints: vec![service_monitor_endpoint],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(discord_receiver)],
|
||||
rules: vec![Box::new(additional_rules)],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(tenant), Box::new(alerting_score)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
@@ -10,7 +10,7 @@ use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
http::HttpScore,
|
||||
http::StaticFilesHttpScore,
|
||||
ipxe::IpxeScore,
|
||||
okd::{
|
||||
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
||||
@@ -126,7 +126,7 @@ async fn main() {
|
||||
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
||||
|
||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||
let http_score = HttpScore::new(Url::LocalFolder(
|
||||
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
||||
"./data/watchguard/pxe-http-files".to_string(),
|
||||
));
|
||||
let ipxe_score = IpxeScore::new();
|
||||
|
||||
12
examples/ntfy/Cargo.toml
Normal file
12
examples/ntfy/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "example-ntfy"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
20
examples/ntfy/src/main.rs
Normal file
20
examples/ntfy/src/main.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use harmony::{
|
||||
inventory::Inventory, maestro::Maestro, modules::monitoring::ntfy::ntfy::NtfyScore,
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
maestro.register_all(vec![Box::new(NtfyScore {
|
||||
namespace: "monitoring".to_string(),
|
||||
host: "localhost".to_string(),
|
||||
})]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
@@ -16,3 +16,4 @@ harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||
|
||||
@@ -11,9 +11,9 @@ use harmony::{
|
||||
maestro::Maestro,
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
http::HttpScore,
|
||||
http::StaticFilesHttpScore,
|
||||
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
||||
opnsense::OPNsenseShellCommandScore,
|
||||
opnsense::{OPNSenseLaunchUpgrade, OPNsenseShellCommandScore},
|
||||
tftp::TftpScore,
|
||||
},
|
||||
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||
@@ -22,8 +22,10 @@ use harmony_macros::{ip, mac_address};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
|
||||
let firewall = harmony::topology::LogicalHost {
|
||||
ip: ip!("192.168.5.229"),
|
||||
ip: ip!("192.168.122.106"),
|
||||
name: String::from("opnsense-1"),
|
||||
};
|
||||
|
||||
@@ -81,7 +83,7 @@ async fn main() {
|
||||
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
||||
|
||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||
let http_score = HttpScore::new(Url::LocalFolder(
|
||||
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
||||
"./data/watchguard/pxe-http-files".to_string(),
|
||||
));
|
||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||
@@ -95,9 +97,12 @@ async fn main() {
|
||||
opnsense: opnsense.get_opnsense_config(),
|
||||
command: "touch /tmp/helloharmonytouching".to_string(),
|
||||
}),
|
||||
// Box::new(OPNSenseLaunchUpgrade {
|
||||
// opnsense: opnsense.get_opnsense_config(),
|
||||
// }),
|
||||
Box::new(SuccessScore {}),
|
||||
Box::new(ErrorScore {}),
|
||||
Box::new(PanicScore {}),
|
||||
]);
|
||||
harmony_tui::init(maestro).await.unwrap();
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
||||
15
examples/rust/Cargo.toml
Normal file
15
examples/rust/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "example-rust"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
tokio = { workspace = true }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
base64.workspace = true
|
||||
43
examples/rust/src/main.rs
Normal file
43
examples/rust/src/main.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
maestro::Maestro,
|
||||
modules::application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{ContinuousDelivery, Monitoring},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, Url},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
|
||||
let topology = K8sAnywhereTopology::from_env();
|
||||
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-rust-webapp".to_string(),
|
||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
});
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(ContinuousDelivery {
|
||||
application: application.clone(),
|
||||
}), // TODO add monitoring, backups, multisite ha, etc
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
}),
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
maestro.register_all(vec![Box::new(app)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
14
examples/rust/webapp/.gitignore
vendored
Normal file
14
examples/rust/webapp/.gitignore
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
93
examples/rust/webapp/Cargo.toml
Normal file
93
examples/rust/webapp/Cargo.toml
Normal file
@@ -0,0 +1,93 @@
|
||||
[package]
|
||||
name = "harmony-example-rust-webapp"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[workspace]
|
||||
|
||||
[dependencies]
|
||||
actix-files = { version = "0.6", optional = true }
|
||||
actix-web = { version = "4", optional = true, features = ["macros"] }
|
||||
console_error_panic_hook = "0.1"
|
||||
http = { version = "1.0.0", optional = true }
|
||||
leptos = { version = "0.7.0" }
|
||||
leptos_meta = { version = "0.7.0" }
|
||||
leptos_actix = { version = "0.7.0", optional = true }
|
||||
leptos_router = { version = "0.7.0" }
|
||||
wasm-bindgen = "=0.2.100"
|
||||
|
||||
[features]
|
||||
csr = ["leptos/csr"]
|
||||
hydrate = ["leptos/hydrate"]
|
||||
ssr = [
|
||||
"dep:actix-files",
|
||||
"dep:actix-web",
|
||||
"dep:leptos_actix",
|
||||
"leptos/ssr",
|
||||
"leptos_meta/ssr",
|
||||
"leptos_router/ssr",
|
||||
]
|
||||
|
||||
# Defines a size-optimized profile for the WASM bundle in release mode
|
||||
[profile.wasm-release]
|
||||
inherits = "release"
|
||||
opt-level = 'z'
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
panic = "abort"
|
||||
|
||||
[package.metadata.leptos]
|
||||
# The name used by wasm-bindgen/cargo-leptos for the JS/WASM bundle. Defaults to the crate name
|
||||
output-name = "harmony-example-rust-webapp"
|
||||
# The site root folder is where cargo-leptos generate all output. WARNING: all content of this folder will be erased on a rebuild. Use it in your server setup.
|
||||
site-root = "target/site"
|
||||
# The site-root relative folder where all compiled output (JS, WASM and CSS) is written
|
||||
# Defaults to pkg
|
||||
site-pkg-dir = "pkg"
|
||||
# [Optional] The source CSS file. If it ends with .sass or .scss then it will be compiled by dart-sass into CSS. The CSS is optimized by Lightning CSS before being written to <site-root>/<site-pkg>/app.css
|
||||
style-file = "style/main.scss"
|
||||
# Assets source dir. All files found here will be copied and synchronized to site-root.
|
||||
# The assets-dir cannot have a sub directory with the same name/path as site-pkg-dir.
|
||||
#
|
||||
# Optional. Env: LEPTOS_ASSETS_DIR.
|
||||
assets-dir = "assets"
|
||||
# The IP and port (ex: 127.0.0.1:3000) where the server serves the content. Use it in your server setup.
|
||||
site-addr = "0.0.0.0:3000"
|
||||
# The port to use for automatic reload monitoring
|
||||
reload-port = 3001
|
||||
# [Optional] Command to use when running end2end tests. It will run in the end2end dir.
|
||||
# [Windows] for non-WSL use "npx.cmd playwright test"
|
||||
# This binary name can be checked in Powershell with Get-Command npx
|
||||
end2end-cmd = "npx playwright test"
|
||||
end2end-dir = "end2end"
|
||||
# The browserlist query used for optimizing the CSS.
|
||||
browserquery = "defaults"
|
||||
# The environment Leptos will run in, usually either "DEV" or "PROD"
|
||||
env = "DEV"
|
||||
# The features to use when compiling the bin target
|
||||
#
|
||||
# Optional. Can be over-ridden with the command line parameter --bin-features
|
||||
bin-features = ["ssr"]
|
||||
|
||||
# If the --no-default-features flag should be used when compiling the bin target
|
||||
#
|
||||
# Optional. Defaults to false.
|
||||
bin-default-features = false
|
||||
|
||||
# The features to use when compiling the lib target
|
||||
#
|
||||
# Optional. Can be over-ridden with the command line parameter --lib-features
|
||||
lib-features = ["hydrate"]
|
||||
|
||||
# If the --no-default-features flag should be used when compiling the lib target
|
||||
#
|
||||
# Optional. Defaults to false.
|
||||
lib-default-features = false
|
||||
|
||||
# The profile to use for the lib target when compiling for release
|
||||
#
|
||||
# Optional. Defaults to "release".
|
||||
lib-profile-release = "wasm-release"
|
||||
16
examples/rust/webapp/Dockerfile.harmony
Normal file
16
examples/rust/webapp/Dockerfile.harmony
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM rust:bookworm as builder
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends clang wget && wget https://github.com/cargo-bins/cargo-binstall/releases/latest/download/cargo-binstall-x86_64-unknown-linux-musl.tgz && tar -xvf cargo-binstall-x86_64-unknown-linux-musl.tgz && cp cargo-binstall /usr/local/cargo/bin && rm cargo-binstall-x86_64-unknown-linux-musl.tgz cargo-binstall && apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
RUN cargo binstall cargo-leptos -y
|
||||
RUN rustup target add wasm32-unknown-unknown
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN cargo leptos build --release -vv
|
||||
FROM debian:bookworm-slim
|
||||
RUN groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser
|
||||
ENV LEPTOS_SITE_ADDR=0.0.0.0:3000
|
||||
EXPOSE 3000/tcp
|
||||
WORKDIR /home/appuser
|
||||
COPY --from=builder /app/target/site/pkg /home/appuser/pkg
|
||||
COPY --from=builder /app/target/release/harmony-example-rust-webapp /home/appuser/harmony-example-rust-webapp
|
||||
USER appuser
|
||||
CMD /home/appuser/harmony-example-rust-webapp
|
||||
24
examples/rust/webapp/LICENSE
Normal file
24
examples/rust/webapp/LICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <https://unlicense.org>
|
||||
72
examples/rust/webapp/README.md
Normal file
72
examples/rust/webapp/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
<picture>
|
||||
<source srcset="https://raw.githubusercontent.com/leptos-rs/leptos/main/docs/logos/Leptos_logo_Solid_White.svg" media="(prefers-color-scheme: dark)">
|
||||
<img src="https://raw.githubusercontent.com/leptos-rs/leptos/main/docs/logos/Leptos_logo_RGB.svg" alt="Leptos Logo">
|
||||
</picture>
|
||||
|
||||
# Leptos Starter Template
|
||||
|
||||
This is a template for use with the [Leptos](https://github.com/leptos-rs/leptos) web framework and the [cargo-leptos](https://github.com/akesson/cargo-leptos) tool.
|
||||
|
||||
## Creating your template repo
|
||||
|
||||
If you don't have `cargo-leptos` installed you can install it with
|
||||
|
||||
`cargo install cargo-leptos --locked`
|
||||
|
||||
Then run
|
||||
|
||||
`cargo leptos new --git leptos-rs/start-actix`
|
||||
|
||||
to generate a new project template (you will be prompted to enter a project name).
|
||||
|
||||
`cd {projectname}`
|
||||
|
||||
to go to your newly created project.
|
||||
|
||||
Of course, you should explore around the project structure, but the best place to start with your application code is in `src/app.rs`.
|
||||
|
||||
## Running your project
|
||||
|
||||
`cargo leptos watch`
|
||||
By default, you can access your local project at `http://localhost:3000`
|
||||
|
||||
## Installing Additional Tools
|
||||
|
||||
By default, `cargo-leptos` uses `nightly` Rust, `cargo-generate`, and `sass`. If you run into any trouble, you may need to install one or more of these tools.
|
||||
|
||||
1. `rustup toolchain install nightly --allow-downgrade` - make sure you have Rust nightly
|
||||
2. `rustup target add wasm32-unknown-unknown` - add the ability to compile Rust to WebAssembly
|
||||
3. `cargo install cargo-generate` - install `cargo-generate` binary (should be installed automatically in future)
|
||||
4. `npm install -g sass` - install `dart-sass` (should be optional in future)
|
||||
|
||||
## Executing a Server on a Remote Machine Without the Toolchain
|
||||
After running a `cargo leptos build --release` the minimum files needed are:
|
||||
|
||||
1. The server binary located in `target/server/release`
|
||||
2. The `site` directory and all files within located in `target/site`
|
||||
|
||||
Copy these files to your remote server. The directory structure should be:
|
||||
```text
|
||||
leptos_start
|
||||
site/
|
||||
```
|
||||
Set the following environment variables (updating for your project as needed):
|
||||
```sh
|
||||
export LEPTOS_OUTPUT_NAME="leptos_start"
|
||||
export LEPTOS_SITE_ROOT="site"
|
||||
export LEPTOS_SITE_PKG_DIR="pkg"
|
||||
export LEPTOS_SITE_ADDR="127.0.0.1:3000"
|
||||
export LEPTOS_RELOAD_PORT="3001"
|
||||
```
|
||||
Finally, run the server binary.
|
||||
|
||||
## Notes about CSR and Trunk:
|
||||
Although it is not recommended, you can also run your project without server integration using the feature `csr` and `trunk serve`:
|
||||
|
||||
`trunk serve --open --features csr`
|
||||
|
||||
This may be useful for integrating external tools which require a static site, e.g. `tauri`.
|
||||
|
||||
## Licensing
|
||||
|
||||
This template itself is released under the Unlicense. You should replace the LICENSE for your own application with an appropriate license if you plan to release it publicly.
|
||||
BIN
examples/rust/webapp/assets/favicon.ico
Normal file
BIN
examples/rust/webapp/assets/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
112
examples/rust/webapp/end2end/package-lock.json
generated
Normal file
112
examples/rust/webapp/end2end/package-lock.json
generated
Normal file
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"name": "end2end",
|
||||
"version": "1.0.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "end2end",
|
||||
"version": "1.0.0",
|
||||
"license": "ISC",
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.44.1",
|
||||
"@types/node": "^20.12.12",
|
||||
"typescript": "^5.4.5"
|
||||
}
|
||||
},
|
||||
"node_modules/@playwright/test": {
|
||||
"version": "1.44.1",
|
||||
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.44.1.tgz",
|
||||
"integrity": "sha512-1hZ4TNvD5z9VuhNJ/walIjvMVvYkZKf71axoF/uiAqpntQJXpG64dlXhoDXE3OczPuTuvjf/M5KWFg5VAVUS3Q==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"playwright": "1.44.1"
|
||||
},
|
||||
"bin": {
|
||||
"playwright": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "20.12.12",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.12.tgz",
|
||||
"integrity": "sha512-eWLDGF/FOSPtAvEqeRAQ4C8LSA7M1I7i0ky1I8U7kD1J5ITyW3AsRhQrKVoWf5pFKZ2kILsEGJhsI9r93PYnOw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
},
|
||||
"node_modules/fsevents": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
|
||||
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
|
||||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/playwright": {
|
||||
"version": "1.44.1",
|
||||
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.44.1.tgz",
|
||||
"integrity": "sha512-qr/0UJ5CFAtloI3avF95Y0L1xQo6r3LQArLIg/z/PoGJ6xa+EwzrwO5lpNr/09STxdHuUoP2mvuELJS+hLdtgg==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"playwright-core": "1.44.1"
|
||||
},
|
||||
"bin": {
|
||||
"playwright": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"fsevents": "2.3.2"
|
||||
}
|
||||
},
|
||||
"node_modules/playwright-core": {
|
||||
"version": "1.44.1",
|
||||
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.44.1.tgz",
|
||||
"integrity": "sha512-wh0JWtYTrhv1+OSsLPgFzGzt67Y7BE/ZS3jEqgGBlp2ppp1ZDj8c+9IARNW4dwf1poq5MgHreEM2KV/GuR4cFA==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"playwright-core": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
}
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.4.5",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz",
|
||||
"integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.17"
|
||||
}
|
||||
},
|
||||
"node_modules/undici-types": {
|
||||
"version": "5.26.5",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
||||
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
}
|
||||
}
|
||||
}
|
||||
15
examples/rust/webapp/end2end/package.json
Normal file
15
examples/rust/webapp/end2end/package.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"name": "end2end",
|
||||
"version": "1.0.0",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.44.1",
|
||||
"@types/node": "^20.12.12",
|
||||
"typescript": "^5.4.5"
|
||||
}
|
||||
}
|
||||
104
examples/rust/webapp/end2end/playwright.config.ts
Normal file
104
examples/rust/webapp/end2end/playwright.config.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
import { devices, defineConfig } from "@playwright/test";
|
||||
|
||||
/**
|
||||
* Read environment variables from file.
|
||||
* https://github.com/motdotla/dotenv
|
||||
*/
|
||||
// require('dotenv').config();
|
||||
|
||||
/**
|
||||
* See https://playwright.dev/docs/test-configuration.
|
||||
*/
|
||||
export default defineConfig({
|
||||
testDir: "./tests",
|
||||
/* Maximum time one test can run for. */
|
||||
timeout: 30 * 1000,
|
||||
expect: {
|
||||
/**
|
||||
* Maximum time expect() should wait for the condition to be met.
|
||||
* For example in `await expect(locator).toHaveText();`
|
||||
*/
|
||||
timeout: 5000,
|
||||
},
|
||||
/* Run tests in files in parallel */
|
||||
fullyParallel: true,
|
||||
/* Fail the build on CI if you accidentally left test.only in the source code. */
|
||||
forbidOnly: !!process.env.CI,
|
||||
/* Retry on CI only */
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
/* Opt out of parallel tests on CI. */
|
||||
workers: process.env.CI ? 1 : undefined,
|
||||
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
|
||||
reporter: "html",
|
||||
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
|
||||
use: {
|
||||
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
|
||||
actionTimeout: 0,
|
||||
/* Base URL to use in actions like `await page.goto('/')`. */
|
||||
// baseURL: 'http://localhost:3000',
|
||||
|
||||
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
||||
trace: "on-first-retry",
|
||||
},
|
||||
|
||||
/* Configure projects for major browsers */
|
||||
projects: [
|
||||
{
|
||||
name: "chromium",
|
||||
use: {
|
||||
...devices["Desktop Chrome"],
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "firefox",
|
||||
use: {
|
||||
...devices["Desktop Firefox"],
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "webkit",
|
||||
use: {
|
||||
...devices["Desktop Safari"],
|
||||
},
|
||||
},
|
||||
|
||||
/* Test against mobile viewports. */
|
||||
// {
|
||||
// name: 'Mobile Chrome',
|
||||
// use: {
|
||||
// ...devices['Pixel 5'],
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: 'Mobile Safari',
|
||||
// use: {
|
||||
// ...devices['iPhone 12'],
|
||||
// },
|
||||
// },
|
||||
|
||||
/* Test against branded browsers. */
|
||||
// {
|
||||
// name: 'Microsoft Edge',
|
||||
// use: {
|
||||
// channel: 'msedge',
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: 'Google Chrome',
|
||||
// use: {
|
||||
// channel: 'chrome',
|
||||
// },
|
||||
// },
|
||||
],
|
||||
|
||||
/* Folder for test artifacts such as screenshots, videos, traces, etc. */
|
||||
// outputDir: 'test-results/',
|
||||
|
||||
/* Run your local dev server before starting the tests */
|
||||
// webServer: {
|
||||
// command: 'npm run start',
|
||||
// port: 3000,
|
||||
// },
|
||||
});
|
||||
9
examples/rust/webapp/end2end/tests/example.spec.ts
Normal file
9
examples/rust/webapp/end2end/tests/example.spec.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import { test, expect } from "@playwright/test";
|
||||
|
||||
test("homepage has title and links to intro page", async ({ page }) => {
|
||||
await page.goto("http://localhost:3000/");
|
||||
|
||||
await expect(page).toHaveTitle("Welcome to Leptos");
|
||||
|
||||
await expect(page.locator("h1")).toHaveText("Welcome to Leptos!");
|
||||
});
|
||||
109
examples/rust/webapp/end2end/tsconfig.json
Normal file
109
examples/rust/webapp/end2end/tsconfig.json
Normal file
@@ -0,0 +1,109 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
/* Visit https://aka.ms/tsconfig to read more about this file */
|
||||
|
||||
/* Projects */
|
||||
// "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
|
||||
// "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
|
||||
// "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */
|
||||
// "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */
|
||||
// "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
|
||||
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
|
||||
|
||||
/* Language and Environment */
|
||||
"target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
|
||||
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
|
||||
// "jsx": "preserve", /* Specify what JSX code is generated. */
|
||||
// "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */
|
||||
// "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
|
||||
// "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
|
||||
// "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
|
||||
// "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
|
||||
// "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
|
||||
// "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
|
||||
// "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
|
||||
// "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */
|
||||
|
||||
/* Modules */
|
||||
"module": "commonjs", /* Specify what module code is generated. */
|
||||
// "rootDir": "./", /* Specify the root folder within your source files. */
|
||||
// "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */
|
||||
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
|
||||
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
|
||||
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
|
||||
// "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
|
||||
// "types": [], /* Specify type package names to be included without being referenced in a source file. */
|
||||
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
|
||||
// "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */
|
||||
// "allowImportingTsExtensions": true, /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */
|
||||
// "resolvePackageJsonExports": true, /* Use the package.json 'exports' field when resolving package imports. */
|
||||
// "resolvePackageJsonImports": true, /* Use the package.json 'imports' field when resolving imports. */
|
||||
// "customConditions": [], /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */
|
||||
// "resolveJsonModule": true, /* Enable importing .json files. */
|
||||
// "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */
|
||||
// "noResolve": true, /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */
|
||||
|
||||
/* JavaScript Support */
|
||||
// "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
|
||||
// "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
|
||||
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */
|
||||
|
||||
/* Emit */
|
||||
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
|
||||
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
|
||||
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
|
||||
// "sourceMap": true, /* Create source map files for emitted JavaScript files. */
|
||||
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
|
||||
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
|
||||
// "outDir": "./", /* Specify an output folder for all emitted files. */
|
||||
// "removeComments": true, /* Disable emitting comments. */
|
||||
// "noEmit": true, /* Disable emitting files from a compilation. */
|
||||
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
|
||||
// "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */
|
||||
// "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
|
||||
// "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
|
||||
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
|
||||
// "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
|
||||
// "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
|
||||
// "newLine": "crlf", /* Set the newline character for emitting files. */
|
||||
// "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
|
||||
// "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */
|
||||
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
|
||||
// "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */
|
||||
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
|
||||
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
|
||||
|
||||
/* Interop Constraints */
|
||||
// "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
|
||||
// "verbatimModuleSyntax": true, /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */
|
||||
// "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
|
||||
"esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
|
||||
// "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
|
||||
"forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
|
||||
|
||||
/* Type Checking */
|
||||
"strict": true, /* Enable all strict type-checking options. */
|
||||
// "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */
|
||||
// "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */
|
||||
// "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
|
||||
// "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
|
||||
// "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
|
||||
// "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */
|
||||
// "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */
|
||||
// "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
|
||||
// "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */
|
||||
// "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */
|
||||
// "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
|
||||
// "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
|
||||
// "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
|
||||
// "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */
|
||||
// "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
|
||||
// "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */
|
||||
// "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
|
||||
// "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
|
||||
|
||||
/* Completeness */
|
||||
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
|
||||
"skipLibCheck": true /* Skip type checking all .d.ts files. */
|
||||
}
|
||||
}
|
||||
66
examples/rust/webapp/src/app.rs
Normal file
66
examples/rust/webapp/src/app.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use leptos::prelude::*;
|
||||
use leptos_meta::{provide_meta_context, Stylesheet, Title};
|
||||
use leptos_router::{
|
||||
components::{Route, Router, Routes},
|
||||
StaticSegment, WildcardSegment,
|
||||
};
|
||||
|
||||
#[component]
|
||||
pub fn App() -> impl IntoView {
|
||||
// Provides context that manages stylesheets, titles, meta tags, etc.
|
||||
provide_meta_context();
|
||||
|
||||
view! {
|
||||
// injects a stylesheet into the document <head>
|
||||
// id=leptos means cargo-leptos will hot-reload this stylesheet
|
||||
<Stylesheet id="leptos" href="/pkg/harmony-example-rust-webapp.css"/>
|
||||
|
||||
// sets the document title
|
||||
<Title text="Welcome to Leptos"/>
|
||||
|
||||
// content for this welcome page
|
||||
<Router>
|
||||
<main>
|
||||
<Routes fallback=move || "Not found.">
|
||||
<Route path=StaticSegment("") view=HomePage/>
|
||||
<Route path=WildcardSegment("any") view=NotFound/>
|
||||
</Routes>
|
||||
</main>
|
||||
</Router>
|
||||
}
|
||||
}
|
||||
|
||||
/// Renders the home page of your application.
|
||||
#[component]
|
||||
fn HomePage() -> impl IntoView {
|
||||
// Creates a reactive value to update the button
|
||||
let count = RwSignal::new(0);
|
||||
let on_click = move |_| *count.write() += 1;
|
||||
|
||||
view! {
|
||||
<h1>"Welcome to Leptos!"</h1>
|
||||
<button on:click=on_click>"Click Me: " {count}</button>
|
||||
}
|
||||
}
|
||||
|
||||
/// 404 - Not Found
|
||||
#[component]
|
||||
fn NotFound() -> impl IntoView {
|
||||
// set an HTTP status code 404
|
||||
// this is feature gated because it can only be done during
|
||||
// initial server-side rendering
|
||||
// if you navigate to the 404 page subsequently, the status
|
||||
// code will not be set because there is not a new HTTP request
|
||||
// to the server
|
||||
#[cfg(feature = "ssr")]
|
||||
{
|
||||
// this can be done inline because it's synchronous
|
||||
// if it were async, we'd use a server function
|
||||
let resp = expect_context::<leptos_actix::ResponseOptions>();
|
||||
resp.set_status(actix_web::http::StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
view! {
|
||||
<h1>"Not Found"</h1>
|
||||
}
|
||||
}
|
||||
9
examples/rust/webapp/src/lib.rs
Normal file
9
examples/rust/webapp/src/lib.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
pub mod app;
|
||||
|
||||
#[cfg(feature = "hydrate")]
|
||||
#[wasm_bindgen::prelude::wasm_bindgen]
|
||||
pub fn hydrate() {
|
||||
use app::*;
|
||||
console_error_panic_hook::set_once();
|
||||
leptos::mount::hydrate_body(App);
|
||||
}
|
||||
88
examples/rust/webapp/src/main.rs
Normal file
88
examples/rust/webapp/src/main.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
#[cfg(feature = "ssr")]
|
||||
#[actix_web::main]
|
||||
async fn main() -> std::io::Result<()> {
|
||||
use actix_files::Files;
|
||||
use actix_web::*;
|
||||
use leptos::prelude::*;
|
||||
use leptos::config::get_configuration;
|
||||
use leptos_meta::MetaTags;
|
||||
use leptos_actix::{generate_route_list, LeptosRoutes};
|
||||
use harmony_example_rust_webapp::app::*;
|
||||
|
||||
let conf = get_configuration(None).unwrap();
|
||||
let addr = conf.leptos_options.site_addr;
|
||||
|
||||
HttpServer::new(move || {
|
||||
// Generate the list of routes in your Leptos App
|
||||
let routes = generate_route_list(App);
|
||||
let leptos_options = &conf.leptos_options;
|
||||
let site_root = leptos_options.site_root.clone().to_string();
|
||||
|
||||
println!("listening on http://{}", &addr);
|
||||
|
||||
App::new()
|
||||
// serve JS/WASM/CSS from `pkg`
|
||||
.service(Files::new("/pkg", format!("{site_root}/pkg")))
|
||||
// serve other assets from the `assets` directory
|
||||
.service(Files::new("/assets", &site_root))
|
||||
// serve the favicon from /favicon.ico
|
||||
.service(favicon)
|
||||
.leptos_routes(routes, {
|
||||
let leptos_options = leptos_options.clone();
|
||||
move || {
|
||||
view! {
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
||||
<AutoReload options=leptos_options.clone() />
|
||||
<HydrationScripts options=leptos_options.clone()/>
|
||||
<MetaTags/>
|
||||
</head>
|
||||
<body>
|
||||
<App/>
|
||||
</body>
|
||||
</html>
|
||||
}
|
||||
}
|
||||
})
|
||||
.app_data(web::Data::new(leptos_options.to_owned()))
|
||||
//.wrap(middleware::Compress::default())
|
||||
})
|
||||
.bind(&addr)?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
#[cfg(feature = "ssr")]
|
||||
#[actix_web::get("favicon.ico")]
|
||||
async fn favicon(
|
||||
leptos_options: actix_web::web::Data<leptos::config::LeptosOptions>,
|
||||
) -> actix_web::Result<actix_files::NamedFile> {
|
||||
let leptos_options = leptos_options.into_inner();
|
||||
let site_root = &leptos_options.site_root;
|
||||
Ok(actix_files::NamedFile::open(format!(
|
||||
"{site_root}/favicon.ico"
|
||||
))?)
|
||||
}
|
||||
|
||||
#[cfg(not(any(feature = "ssr", feature = "csr")))]
|
||||
pub fn main() {
|
||||
// no client-side main function
|
||||
// unless we want this to work with e.g., Trunk for pure client-side testing
|
||||
// see lib.rs for hydration function instead
|
||||
// see optional feature `csr` instead
|
||||
}
|
||||
|
||||
#[cfg(all(not(feature = "ssr"), feature = "csr"))]
|
||||
pub fn main() {
|
||||
// a client-side main function is required for using `trunk serve`
|
||||
// prefer using `cargo leptos serve` instead
|
||||
// to run: `trunk serve --open --features csr`
|
||||
use harmony_example_rust_webapp::app::*;
|
||||
|
||||
console_error_panic_hook::set_once();
|
||||
|
||||
leptos::mount_to_body(App);
|
||||
}
|
||||
4
examples/rust/webapp/style/main.scss
Normal file
4
examples/rust/webapp/style/main.scss
Normal file
@@ -0,0 +1,4 @@
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
text-align: center;
|
||||
}
|
||||
@@ -10,9 +10,9 @@ publish = false
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_tui = { path = "../../harmony_tui" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
|
||||
@@ -13,7 +13,7 @@ reqwest = { version = "0.11", features = ["blocking", "json"] }
|
||||
russh = "0.45.0"
|
||||
rust-ipmi = "0.1.1"
|
||||
semver = "1.0.23"
|
||||
serde = { version = "1.0.209", features = ["derive"] }
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
tokio.workspace = true
|
||||
derive-new.workspace = true
|
||||
@@ -53,3 +53,16 @@ fqdn = { version = "0.4.6", features = [
|
||||
] }
|
||||
temp-dir = "0.1.14"
|
||||
dyn-clone = "1.0.19"
|
||||
similar.workspace = true
|
||||
futures-util = "0.3.31"
|
||||
tokio-util = "0.7.15"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
tempfile = "3.20.0"
|
||||
serde_with = "3.14.0"
|
||||
bollard.workspace = true
|
||||
tar.workspace = true
|
||||
base64.workspace = true
|
||||
figment.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions.workspace = true
|
||||
|
||||
@@ -1,13 +1,66 @@
|
||||
use figment::{
|
||||
Error, Figment, Metadata, Profile, Provider,
|
||||
providers::{Env, Format},
|
||||
value::{Dict, Map},
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref HARMONY_CONFIG_DIR: PathBuf = directories::BaseDirs::new()
|
||||
.unwrap()
|
||||
.data_dir()
|
||||
.join("harmony");
|
||||
pub static ref REGISTRY_URL: String =
|
||||
std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string());
|
||||
pub static ref REGISTRY_PROJECT: String =
|
||||
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct Config {
|
||||
pub data_dir: PathBuf,
|
||||
pub registry_url: String,
|
||||
pub registry_project: String,
|
||||
pub dry_run: bool,
|
||||
pub run_upgrades: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Config {
|
||||
data_dir: directories::BaseDirs::new()
|
||||
.unwrap()
|
||||
.data_dir()
|
||||
.join("harmony"),
|
||||
registry_url: "hub.nationtech.io".to_string(),
|
||||
registry_project: "harmony".to_string(),
|
||||
dry_run: true,
|
||||
run_upgrades: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load() -> Result<Self, figment::Error> {
|
||||
Figment::from(Config::default())
|
||||
.merge(Env::prefixed("HARMONY_"))
|
||||
.extract()
|
||||
}
|
||||
|
||||
fn from<T: Provider>(provider: T) -> Result<Config, Error> {
|
||||
Figment::from(provider).extract()
|
||||
}
|
||||
|
||||
fn figment() -> Figment {
|
||||
use figment::providers::Env;
|
||||
|
||||
// In reality, whatever the library desires.
|
||||
Figment::from(Config::default()).merge(Env::prefixed("HARMONY_"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Provider for Config {
|
||||
fn metadata(&self) -> Metadata {
|
||||
Metadata::named("Harmony Config")
|
||||
}
|
||||
|
||||
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
|
||||
figment::providers::Serialized::defaults(Config::default()).data()
|
||||
}
|
||||
|
||||
fn profile(&self) -> Option<Profile> {
|
||||
// Optionally, a profile that's selected by default.
|
||||
Some(Profile::Default)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ pub enum InterpretName {
|
||||
OPNSense,
|
||||
K3dInstallation,
|
||||
TenantInterpret,
|
||||
Application,
|
||||
ArgoCD,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for InterpretName {
|
||||
@@ -37,6 +39,8 @@ impl std::fmt::Display for InterpretName {
|
||||
InterpretName::OPNSense => f.write_str("OPNSense"),
|
||||
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
|
||||
InterpretName::TenantInterpret => f.write_str("Tenant"),
|
||||
InterpretName::Application => f.write_str("Application"),
|
||||
InterpretName::ArgoCD => f.write_str("ArgoCD"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,3 +128,11 @@ impl From<kube::Error> for InterpretError {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for InterpretError {
|
||||
fn from(value: String) -> Self {
|
||||
Self {
|
||||
msg: format!("InterpretError : {value}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,17 @@ pub struct Inventory {
|
||||
}
|
||||
|
||||
impl Inventory {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
location: Location::new("Empty".to_string(), "location".to_string()),
|
||||
switch: vec![],
|
||||
firewall: vec![],
|
||||
worker_host: vec![],
|
||||
storage_host: vec![],
|
||||
control_plane_host: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn autoload() -> Self {
|
||||
Self {
|
||||
location: Location::test_building(),
|
||||
|
||||
@@ -19,7 +19,10 @@ pub struct Maestro<T: Topology> {
|
||||
}
|
||||
|
||||
impl<T: Topology> Maestro<T> {
|
||||
pub fn new(inventory: Inventory, topology: T) -> Self {
|
||||
/// Creates a bare maestro without initialization.
|
||||
///
|
||||
/// This should rarely be used. Most of the time Maestro::initialize should be used instead.
|
||||
pub fn new_without_initialization(inventory: Inventory, topology: T) -> Self {
|
||||
Self {
|
||||
inventory,
|
||||
topology,
|
||||
@@ -29,7 +32,7 @@ impl<T: Topology> Maestro<T> {
|
||||
}
|
||||
|
||||
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
|
||||
let instance = Self::new(inventory, topology);
|
||||
let instance = Self::new_without_initialization(inventory, topology);
|
||||
instance.prepare_topology().await?;
|
||||
Ok(instance)
|
||||
}
|
||||
|
||||
59
harmony/src/domain/score_with_dep.rs
Normal file
59
harmony/src/domain/score_with_dep.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
////////////////////
|
||||
/// Working idea
|
||||
///
|
||||
///
|
||||
trait ScoreWithDep<T> {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
|
||||
fn name(&self) -> String;
|
||||
fn get_dependencies(&self) -> Vec<TypeId>; // Force T to impl Installer<TypeId> or something
|
||||
// like that
|
||||
}
|
||||
|
||||
struct PrometheusAlertScore;
|
||||
|
||||
impl <T> ScoreWithDep<T> for PrometheusAlertScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_dependencies(&self) -> Vec<TypeId> {
|
||||
// We have to find a way to constrait here so at compile time we are only allowed to return
|
||||
// TypeId for types which can be installed by T
|
||||
//
|
||||
// This means, for example that T must implement HelmCommand if the impl <T: HelmCommand> Installable<T> for
|
||||
// KubePrometheus calls for HelmCommand.
|
||||
vec![TypeId::of::<KubePrometheus>()]
|
||||
}
|
||||
}
|
||||
|
||||
trait Installable{}
|
||||
|
||||
struct KubePrometheus;
|
||||
|
||||
impl Installable for KubePrometheus;
|
||||
|
||||
|
||||
struct Maestro<T> {
|
||||
topology: T
|
||||
}
|
||||
|
||||
impl <T>Maestro<T> {
|
||||
fn execute_store(&self, score: ScoreWithDep<T>) {
|
||||
score.get_dependencies().iter().for_each(|dep| {
|
||||
self.topology.ensure_dependency_ready(dep);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
struct TopologyWithDep {
|
||||
}
|
||||
|
||||
impl TopologyWithDep {
|
||||
fn ensure_dependency_ready(&self, type_id: TypeId) -> Result<(), String> {
|
||||
self.installer
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,15 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::error;
|
||||
use log::info;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::executors::ExecutorError;
|
||||
use crate::interpret::InterpretError;
|
||||
use crate::interpret::Outcome;
|
||||
use crate::inventory::Inventory;
|
||||
use crate::topology::upgradeable::Upgradeable;
|
||||
|
||||
use super::DHCPStaticEntry;
|
||||
use super::DhcpServer;
|
||||
@@ -25,9 +29,12 @@ use super::TftpServer;
|
||||
use super::Topology;
|
||||
use super::Url;
|
||||
use super::k8s::K8sClient;
|
||||
use std::fmt::Debug;
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HAClusterTopology {
|
||||
pub domain_name: String,
|
||||
pub router: Arc<dyn Router>,
|
||||
@@ -46,12 +53,18 @@ pub struct HAClusterTopology {
|
||||
#[async_trait]
|
||||
impl Topology for HAClusterTopology {
|
||||
fn name(&self) -> &str {
|
||||
todo!()
|
||||
"HAClusterTopology"
|
||||
}
|
||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
||||
todo!(
|
||||
error!(
|
||||
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
|
||||
)
|
||||
);
|
||||
let config = Config::load().expect("couldn't load config");
|
||||
|
||||
if config.run_upgrades {
|
||||
self.upgrade(&Inventory::empty(), self).await?;
|
||||
}
|
||||
Ok(Outcome::success("for now do nothing".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,6 +264,13 @@ impl Topology for DummyInfra {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Upgradeable<T> for DummyInfra {
|
||||
async fn upgrade(&self, _inventory: &Inventory, _topology: &T) -> Result<(), InterpretError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
}
|
||||
|
||||
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";
|
||||
|
||||
impl Router for DummyInfra {
|
||||
@@ -417,3 +437,12 @@ impl DnsServer for DummyInfra {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Upgradeable<T> for HAClusterTopology {
|
||||
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
error!("TODO implement upgrades for all parts of the cluster");
|
||||
self.firewall.upgrade(inventory, topology).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ use crate::{interpret::InterpretError, inventory::Inventory};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Installable<T>: Send + Sync {
|
||||
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>;
|
||||
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
|
||||
@@ -1,18 +1,43 @@
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{ClusterResourceScope, NamespaceResourceScope};
|
||||
use futures_util::StreamExt;
|
||||
use k8s_openapi::{
|
||||
ClusterResourceScope, NamespaceResourceScope,
|
||||
api::{apps::v1::Deployment, core::v1::Pod},
|
||||
};
|
||||
use kube::{
|
||||
Api, Client, Config, Error, Resource,
|
||||
api::{Patch, PatchParams},
|
||||
Client, Config, Error, Resource,
|
||||
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
runtime::reflector::Lookup,
|
||||
};
|
||||
use kube::{api::DynamicObject, runtime::conditions};
|
||||
use kube::{
|
||||
api::{ApiResource, GroupVersionKind},
|
||||
runtime::wait::await_condition,
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
use serde::de::DeserializeOwned;
|
||||
use similar::{DiffableStr, TextDiff};
|
||||
|
||||
#[derive(new)]
|
||||
use crate::config::Config as HarmonyConfig;
|
||||
|
||||
#[derive(new, Clone)]
|
||||
pub struct K8sClient {
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for K8sClient {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// This is a poor man's debug implementation for now as kube::Client does not provide much
|
||||
// useful information
|
||||
f.write_fmt(format_args!(
|
||||
"K8sClient {{ kube client using default namespace {} }}",
|
||||
self.client.default_namespace()
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sClient {
|
||||
pub async fn try_default() -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
@@ -20,6 +45,88 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn wait_until_deployment_ready(
|
||||
&self,
|
||||
name: String,
|
||||
namespace: Option<&str>,
|
||||
timeout: Option<u64>,
|
||||
) -> Result<(), String> {
|
||||
let api: Api<Deployment>;
|
||||
|
||||
if let Some(ns) = namespace {
|
||||
api = Api::namespaced(self.client.clone(), ns);
|
||||
} else {
|
||||
api = Api::default_namespaced(self.client.clone());
|
||||
}
|
||||
|
||||
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
||||
let t = if let Some(t) = timeout { t } else { 300 };
|
||||
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
|
||||
|
||||
if let Ok(r) = res {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err("timed out while waiting for deployment".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// Will execute a command in the first pod found that matches the label `app.kubernetes.io/name={name}`
|
||||
pub async fn exec_app(
|
||||
&self,
|
||||
name: String,
|
||||
namespace: Option<&str>,
|
||||
command: Vec<&str>,
|
||||
) -> Result<(), String> {
|
||||
let api: Api<Pod>;
|
||||
|
||||
if let Some(ns) = namespace {
|
||||
api = Api::namespaced(self.client.clone(), ns);
|
||||
} else {
|
||||
api = Api::default_namespaced(self.client.clone());
|
||||
}
|
||||
let pod_list = api
|
||||
.list(&ListParams::default().labels(format!("app.kubernetes.io/name={name}").as_str()))
|
||||
.await
|
||||
.expect("couldn't get list of pods");
|
||||
|
||||
let res = api
|
||||
.exec(
|
||||
pod_list
|
||||
.items
|
||||
.first()
|
||||
.expect("couldn't get pod")
|
||||
.name()
|
||||
.expect("couldn't get pod name")
|
||||
.into_owned()
|
||||
.as_str(),
|
||||
command,
|
||||
&AttachParams::default(),
|
||||
)
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Err(e) => return Err(e.to_string()),
|
||||
Ok(mut process) => {
|
||||
let status = process
|
||||
.take_status()
|
||||
.expect("Couldn't get status")
|
||||
.await
|
||||
.expect("Couldn't unwrap status");
|
||||
|
||||
if let Some(s) = status.status {
|
||||
debug!("Status: {}", s);
|
||||
if s == "Success" {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(s);
|
||||
}
|
||||
} else {
|
||||
return Err("Couldn't get inner status of pod exec".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a resource in namespace
|
||||
///
|
||||
/// See `kubectl apply` for more information on the expected behavior of this function
|
||||
@@ -48,8 +155,81 @@ impl K8sClient {
|
||||
.name
|
||||
.as_ref()
|
||||
.expect("K8s Resource should have a name");
|
||||
api.patch(name, &patch_params, &Patch::Apply(resource))
|
||||
.await
|
||||
|
||||
let config = HarmonyConfig::load().expect("couldn't load config");
|
||||
|
||||
if config.dry_run {
|
||||
match api.get(name).await {
|
||||
Ok(current) => {
|
||||
trace!("Received current value {current:#?}");
|
||||
// The resource exists, so we calculate and display a diff.
|
||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
||||
let mut current_yaml = serde_yaml::to_value(¤t)
|
||||
.expect(&format!("Could not serialize current value : {current:#?}"));
|
||||
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
||||
let map = current_yaml.as_mapping_mut().unwrap();
|
||||
let removed = map.remove_entry("status");
|
||||
trace!("Removed status {:?}", removed);
|
||||
} else {
|
||||
trace!(
|
||||
"Did not find status entry for current object {}/{}",
|
||||
current.meta().namespace.as_ref().unwrap_or(&"".to_string()),
|
||||
current.meta().name.as_ref().unwrap_or(&"".to_string())
|
||||
);
|
||||
}
|
||||
let current_yaml = serde_yaml::to_string(¤t_yaml)
|
||||
.unwrap_or_else(|_| "Failed to serialize current resource".to_string());
|
||||
let new_yaml = serde_yaml::to_string(resource)
|
||||
.unwrap_or_else(|_| "Failed to serialize new resource".to_string());
|
||||
|
||||
if current_yaml == new_yaml {
|
||||
println!("No changes detected.");
|
||||
// Return the current resource state as there are no changes.
|
||||
return Ok(current);
|
||||
}
|
||||
|
||||
println!("Changes detected:");
|
||||
let diff = TextDiff::from_lines(¤t_yaml, &new_yaml);
|
||||
|
||||
// Iterate over the changes and print them in a git-like diff format.
|
||||
for change in diff.iter_all_changes() {
|
||||
let sign = match change.tag() {
|
||||
similar::ChangeTag::Delete => "-",
|
||||
similar::ChangeTag::Insert => "+",
|
||||
similar::ChangeTag::Equal => " ",
|
||||
};
|
||||
print!("{}{}", sign, change);
|
||||
}
|
||||
// In a dry run, we return the new resource state that would have been applied.
|
||||
Ok(resource.clone())
|
||||
}
|
||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||
// The resource does not exist, so the "diff" is the entire new resource.
|
||||
println!("\nPerforming dry-run for new resource: '{}'", name);
|
||||
println!(
|
||||
"Resource does not exist. It would be created with the following content:"
|
||||
);
|
||||
let new_yaml = serde_yaml::to_string(resource)
|
||||
.unwrap_or_else(|_| "Failed to serialize new resource".to_string());
|
||||
|
||||
// Print each line of the new resource with a '+' prefix.
|
||||
for line in new_yaml.lines() {
|
||||
println!("+{}", line);
|
||||
}
|
||||
// In a dry run, we return the new resource state that would have been created.
|
||||
Ok(resource.clone())
|
||||
}
|
||||
Err(e) => {
|
||||
// Another API error occurred.
|
||||
error!("Failed to get resource '{}': {}", name, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return api
|
||||
.patch(name, &patch_params, &Patch::Apply(resource))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
|
||||
@@ -66,6 +246,70 @@ impl K8sClient {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn apply_yaml_many(
|
||||
&self,
|
||||
yaml: &Vec<serde_yaml::Value>,
|
||||
ns: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
for y in yaml.iter() {
|
||||
self.apply_yaml(y, ns).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn apply_yaml(
|
||||
&self,
|
||||
yaml: &serde_yaml::Value,
|
||||
ns: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
let obj: DynamicObject = serde_yaml::from_value(yaml.clone()).expect("TODO do not unwrap");
|
||||
let name = obj.metadata.name.as_ref().expect("YAML must have a name");
|
||||
|
||||
let api_version = yaml
|
||||
.get("apiVersion")
|
||||
.expect("couldn't get apiVersion from YAML")
|
||||
.as_str()
|
||||
.expect("couldn't get apiVersion as str");
|
||||
let kind = yaml
|
||||
.get("kind")
|
||||
.expect("couldn't get kind from YAML")
|
||||
.as_str()
|
||||
.expect("couldn't get kind as str");
|
||||
|
||||
let split: Vec<&str> = api_version.splitn(2, "/").collect();
|
||||
let g = split[0];
|
||||
let v = split[1];
|
||||
|
||||
let gvk = GroupVersionKind::gvk(g, v, kind);
|
||||
let api_resource = ApiResource::from_gvk(&gvk);
|
||||
|
||||
let namespace = match ns {
|
||||
Some(n) => n,
|
||||
None => obj
|
||||
.metadata
|
||||
.namespace
|
||||
.as_ref()
|
||||
.expect("YAML must have a namespace"),
|
||||
};
|
||||
|
||||
// 5. Create a dynamic API client for this resource type.
|
||||
let api: Api<DynamicObject> =
|
||||
Api::namespaced_with(self.client.clone(), namespace, &api_resource);
|
||||
|
||||
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||
// This will create the resource if it doesn't exist, or update it if it does.
|
||||
println!(
|
||||
"Applying Argo Application '{}' in namespace '{}'...",
|
||||
name, namespace
|
||||
);
|
||||
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
||||
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||
|
||||
println!("Successfully applied '{}'.", result.name_any());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||
let k = match Kubeconfig::read_from(path) {
|
||||
Ok(k) => k,
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use figment::{Figment, providers::Env};
|
||||
use inquire::Confirm;
|
||||
use log::{debug, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use crate::{
|
||||
@@ -15,27 +17,29 @@ use crate::{
|
||||
};
|
||||
|
||||
use super::{
|
||||
HelmCommand, K8sclient, Topology,
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology,
|
||||
k8s::K8sClient,
|
||||
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct K8sState {
|
||||
client: Arc<K8sClient>,
|
||||
_source: K8sSource,
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
enum K8sSource {
|
||||
LocalK3d,
|
||||
Kubeconfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct K8sAnywhereTopology {
|
||||
k8s_state: OnceCell<Option<K8sState>>,
|
||||
tenant_manager: OnceCell<K8sTenantManager>,
|
||||
config: K8sAnywhereConfig,
|
||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
||||
config: Arc<K8sAnywhereConfig>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -55,20 +59,29 @@ impl K8sclient for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for K8sAnywhereTopology {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sAnywhereTopology {
|
||||
pub fn from_env() -> Self {
|
||||
Self {
|
||||
k8s_state: OnceCell::new(),
|
||||
tenant_manager: OnceCell::new(),
|
||||
config: K8sAnywhereConfig::from_env(),
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_config(config: K8sAnywhereConfig) -> Self {
|
||||
Self {
|
||||
k8s_state: OnceCell::new(),
|
||||
tenant_manager: OnceCell::new(),
|
||||
config,
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(config),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,39 +124,47 @@ impl K8sAnywhereTopology {
|
||||
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
|
||||
let k8s_anywhere_config = &self.config;
|
||||
|
||||
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
|
||||
debug!("Loading kubeconfig {kubeconfig}");
|
||||
match self.try_load_kubeconfig(&kubeconfig).await {
|
||||
Some(client) => {
|
||||
return Ok(Some(K8sState {
|
||||
client: Arc::new(client),
|
||||
_source: K8sSource::Kubeconfig,
|
||||
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
|
||||
}));
|
||||
}
|
||||
None => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to load kubeconfig from {kubeconfig}"
|
||||
)));
|
||||
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
|
||||
// be careful when making modifications here
|
||||
if k8s_anywhere_config.use_local_k3d {
|
||||
info!("Using local k3d cluster because of use_local_k3d set to true");
|
||||
} else {
|
||||
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
|
||||
debug!("Loading kubeconfig {kubeconfig}");
|
||||
match self.try_load_kubeconfig(&kubeconfig).await {
|
||||
Some(client) => {
|
||||
return Ok(Some(K8sState {
|
||||
client: Arc::new(client),
|
||||
_source: K8sSource::Kubeconfig,
|
||||
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
|
||||
}));
|
||||
}
|
||||
None => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to load kubeconfig from {kubeconfig}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if k8s_anywhere_config.use_system_kubeconfig {
|
||||
debug!("Loading system kubeconfig");
|
||||
match self.try_load_system_kubeconfig().await {
|
||||
Some(_client) => todo!(),
|
||||
None => todo!(),
|
||||
if k8s_anywhere_config.use_system_kubeconfig {
|
||||
debug!("Loading system kubeconfig");
|
||||
match self.try_load_system_kubeconfig().await {
|
||||
Some(_client) => todo!(),
|
||||
None => todo!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("No kubernetes configuration found");
|
||||
info!("No kubernetes configuration found");
|
||||
}
|
||||
|
||||
if !k8s_anywhere_config.autoinstall {
|
||||
debug!("Autoinstall confirmation prompt");
|
||||
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
|
||||
.with_default(false)
|
||||
.prompt()
|
||||
.expect("Unexpected prompt error");
|
||||
debug!("Autoinstall confirmation {confirmation}");
|
||||
|
||||
if !confirmation {
|
||||
warn!(
|
||||
@@ -184,8 +205,7 @@ impl K8sAnywhereTopology {
|
||||
let k8s_client = self.k8s_client().await?;
|
||||
Ok(K8sTenantManager::new(k8s_client))
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -200,6 +220,7 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct K8sAnywhereConfig {
|
||||
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
|
||||
/// cluster
|
||||
@@ -217,19 +238,38 @@ pub struct K8sAnywhereConfig {
|
||||
///
|
||||
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
|
||||
///
|
||||
/// Default: true
|
||||
/// Default: false
|
||||
pub autoinstall: bool,
|
||||
|
||||
/// Whether to use local k3d cluster.
|
||||
///
|
||||
/// Takes precedence over other options, useful to avoid messing up a remote cluster by mistake
|
||||
///
|
||||
/// default: true
|
||||
pub use_local_k3d: bool,
|
||||
pub profile: String,
|
||||
}
|
||||
|
||||
impl Default for K8sAnywhereConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
kubeconfig: None,
|
||||
use_system_kubeconfig: false,
|
||||
autoinstall: false,
|
||||
// TODO harmony_profile should be managed at a more core level than this
|
||||
profile: "dev".to_string(),
|
||||
use_local_k3d: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sAnywhereConfig {
|
||||
fn from_env() -> Self {
|
||||
Self {
|
||||
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
|
||||
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
}
|
||||
Figment::new()
|
||||
.merge(Env::prefixed("HARMONY_"))
|
||||
.merge(Env::raw().only(&["KUBECONFIG"]))
|
||||
.extract()
|
||||
.expect("couldn't load config from env")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,6 +303,20 @@ impl Topology for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiTargetTopology for K8sAnywhereTopology {
|
||||
fn current_target(&self) -> DeploymentTarget {
|
||||
if self.config.use_local_k3d {
|
||||
return DeploymentTarget::LocalDev;
|
||||
}
|
||||
|
||||
match self.config.profile.to_lowercase().as_str() {
|
||||
"staging" => DeploymentTarget::Staging,
|
||||
"production" => DeploymentTarget::Production,
|
||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HelmCommand for K8sAnywhereTopology {}
|
||||
|
||||
#[async_trait]
|
||||
@@ -272,4 +326,11 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
.provision_tenant(config)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_tenant_config(&self) -> Option<TenantConfig> {
|
||||
self.get_k8s_tenant_manager()
|
||||
.ok()?
|
||||
.get_tenant_config()
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ mod k8s_anywhere;
|
||||
mod localhost;
|
||||
pub mod oberservability;
|
||||
pub mod tenant;
|
||||
pub mod upgradeable;
|
||||
pub use k8s_anywhere::*;
|
||||
pub use localhost::*;
|
||||
pub mod k8s;
|
||||
@@ -62,6 +63,17 @@ pub trait Topology: Send + Sync {
|
||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum DeploymentTarget {
|
||||
LocalDev,
|
||||
Staging,
|
||||
Production,
|
||||
}
|
||||
|
||||
pub trait MultiTargetTopology: Topology {
|
||||
fn current_target(&self) -> DeploymentTarget;
|
||||
}
|
||||
|
||||
pub type IpAddress = IpAddr;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
||||
@@ -2,9 +2,15 @@ use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::executors::ExecutorError;
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
interpret::InterpretError,
|
||||
inventory::Inventory,
|
||||
topology::{Topology, upgradeable::Upgradeable},
|
||||
};
|
||||
|
||||
use super::{IpAddress, LogicalHost, k8s::K8sClient};
|
||||
|
||||
@@ -38,6 +44,15 @@ impl std::fmt::Debug for dyn Firewall {
|
||||
}
|
||||
}
|
||||
|
||||
// #[async_trait]
|
||||
// impl<T: Topology> Upgradeable<T> for dyn Firewall {
|
||||
// async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
// debug!("upgrading");
|
||||
// self.upgrade(inventory, topology).await?;
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
pub struct NetworkDomain {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.sender.configure(inventory, topology).await?;
|
||||
for receiver in self.receivers.iter() {
|
||||
receiver.install(&self.sender).await?;
|
||||
}
|
||||
|
||||
@@ -5,10 +5,9 @@ use crate::{
|
||||
topology::k8s::{ApplyStrategy, K8sClient},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{
|
||||
api::{
|
||||
core::v1::{Namespace, ResourceQuota},
|
||||
core::v1::{LimitRange, Namespace, ResourceQuota},
|
||||
networking::v1::{
|
||||
NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyIngressRule, NetworkPolicyPort,
|
||||
},
|
||||
@@ -19,12 +18,23 @@ use kube::Resource;
|
||||
use log::{debug, info, warn};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use super::{TenantConfig, TenantManager};
|
||||
|
||||
#[derive(new)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct K8sTenantManager {
|
||||
k8s_client: Arc<K8sClient>,
|
||||
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
|
||||
}
|
||||
|
||||
impl K8sTenantManager {
|
||||
pub fn new(client: Arc<K8sClient>) -> Self {
|
||||
Self {
|
||||
k8s_client: client,
|
||||
k8s_tenant_config: Arc::new(OnceCell::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sTenantManager {
|
||||
@@ -112,8 +122,8 @@ impl K8sTenantManager {
|
||||
"requests.storage": format!("{:.3}Gi", config.resource_limits.storage_total_gb),
|
||||
"pods": "20",
|
||||
"services": "10",
|
||||
"configmaps": "30",
|
||||
"secrets": "30",
|
||||
"configmaps": "60",
|
||||
"secrets": "60",
|
||||
"persistentvolumeclaims": "15",
|
||||
"services.loadbalancers": "2",
|
||||
"services.nodeports": "5",
|
||||
@@ -132,70 +142,142 @@ impl K8sTenantManager {
|
||||
})
|
||||
}
|
||||
|
||||
fn build_limit_range(&self, config: &TenantConfig) -> Result<LimitRange, ExecutorError> {
|
||||
let limit_range = json!({
|
||||
"apiVersion": "v1",
|
||||
"kind": "LimitRange",
|
||||
"metadata": {
|
||||
"name": format!("{}-defaults", config.name),
|
||||
"labels": {
|
||||
"harmony.nationtech.io/tenant.id": config.id.to_string(),
|
||||
"harmony.nationtech.io/tenant.name": config.name,
|
||||
},
|
||||
"namespace": self.get_namespace_name(config),
|
||||
},
|
||||
"spec": {
|
||||
"limits": [
|
||||
{
|
||||
"type": "Container",
|
||||
"default": {
|
||||
"cpu": "500m",
|
||||
"memory": "500Mi"
|
||||
},
|
||||
"defaultRequest": {
|
||||
"cpu": "100m",
|
||||
"memory": "100Mi"
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
});
|
||||
|
||||
serde_json::from_value(limit_range).map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
"Could not build TenantManager LimitRange. {}",
|
||||
e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn build_network_policy(&self, config: &TenantConfig) -> Result<NetworkPolicy, ExecutorError> {
|
||||
let network_policy = json!({
|
||||
"apiVersion": "networking.k8s.io/v1",
|
||||
"kind": "NetworkPolicy",
|
||||
"metadata": {
|
||||
"name": format!("{}-network-policy", config.name),
|
||||
"namespace": self.get_namespace_name(config),
|
||||
},
|
||||
"spec": {
|
||||
"podSelector": {},
|
||||
"egress": [
|
||||
{ "to": [ {"podSelector": {}}]},
|
||||
{ "to":
|
||||
[
|
||||
{
|
||||
"podSelector": {},
|
||||
"namespaceSelector": {
|
||||
"matchLabels": {
|
||||
"kubernetes.io/metadata.name":"openshift-dns"
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
},
|
||||
{ "to": [
|
||||
{
|
||||
"ipBlock": {
|
||||
|
||||
"cidr": "0.0.0.0/0",
|
||||
// See https://en.wikipedia.org/wiki/Reserved_IP_addresses
|
||||
"except": [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"192.0.0.0/24",
|
||||
"192.0.2.0/24",
|
||||
"192.88.99.0/24",
|
||||
"192.18.0.0/15",
|
||||
"198.51.100.0/24",
|
||||
"169.254.0.0/16",
|
||||
"203.0.113.0/24",
|
||||
"127.0.0.0/8",
|
||||
|
||||
// Not sure we should block this one as it is
|
||||
// used for multicast. But better block more than less.
|
||||
"224.0.0.0/4",
|
||||
"240.0.0.0/4",
|
||||
"100.64.0.0/10",
|
||||
"233.252.0.0/24",
|
||||
"0.0.0.0/8",
|
||||
],
|
||||
}
|
||||
{
|
||||
"to": [
|
||||
{ "podSelector": {} }
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
"podSelector": {},
|
||||
"namespaceSelector": {
|
||||
"matchLabels": {
|
||||
"kubernetes.io/metadata.name": "kube-system"
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
"podSelector": {},
|
||||
"namespaceSelector": {
|
||||
"matchLabels": {
|
||||
"kubernetes.io/metadata.name": "openshift-dns"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
"ipBlock": {
|
||||
"cidr": "10.43.0.1/32",
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
"ipBlock": {
|
||||
"cidr": "172.23.0.0/16",
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"to": [
|
||||
{
|
||||
"ipBlock": {
|
||||
"cidr": "0.0.0.0/0",
|
||||
"except": [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"192.0.0.0/24",
|
||||
"192.0.2.0/24",
|
||||
"192.88.99.0/24",
|
||||
"192.18.0.0/15",
|
||||
"198.51.100.0/24",
|
||||
"169.254.0.0/16",
|
||||
"203.0.113.0/24",
|
||||
"127.0.0.0/8",
|
||||
"224.0.0.0/4",
|
||||
"240.0.0.0/4",
|
||||
"100.64.0.0/10",
|
||||
"233.252.0.0/24",
|
||||
"0.0.0.0/8"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"ingress": [
|
||||
{ "from": [ {"podSelector": {}}]}
|
||||
{
|
||||
"from": [
|
||||
{ "podSelector": {} }
|
||||
]
|
||||
}
|
||||
],
|
||||
"policyTypes": [
|
||||
"Ingress", "Egress",
|
||||
"Ingress",
|
||||
"Egress"
|
||||
]
|
||||
}
|
||||
});
|
||||
|
||||
let mut network_policy: NetworkPolicy =
|
||||
serde_json::from_value(network_policy).map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
@@ -219,8 +301,29 @@ impl K8sTenantManager {
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let ports: Option<Vec<NetworkPolicyPort>> =
|
||||
c.1.as_ref().map(|spec| match &spec.data {
|
||||
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(port.clone().into())),
|
||||
..Default::default()
|
||||
}],
|
||||
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(start.clone().into())),
|
||||
end_port: Some(end.clone().into()),
|
||||
protocol: None, // Not currently supported by Harmony
|
||||
}],
|
||||
|
||||
super::PortSpecData::ListOfPorts(items) => items
|
||||
.iter()
|
||||
.map(|i| NetworkPolicyPort {
|
||||
port: Some(IntOrString::Int(i.clone().into())),
|
||||
..Default::default()
|
||||
})
|
||||
.collect(),
|
||||
});
|
||||
let rule = serde_json::from_value::<NetworkPolicyIngressRule>(json!({
|
||||
"from": cidr_list
|
||||
"from": cidr_list,
|
||||
"ports": ports,
|
||||
}))
|
||||
.map_err(|e| {
|
||||
ExecutorError::ConfigurationError(format!(
|
||||
@@ -298,6 +401,9 @@ impl K8sTenantManager {
|
||||
|
||||
Ok(network_policy)
|
||||
}
|
||||
fn store_config(&self, config: &TenantConfig) {
|
||||
let _ = self.k8s_tenant_config.set(config.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -306,6 +412,7 @@ impl TenantManager for K8sTenantManager {
|
||||
let namespace = self.build_namespace(config)?;
|
||||
let resource_quota = self.build_resource_quota(config)?;
|
||||
let network_policy = self.build_network_policy(config)?;
|
||||
let resource_limit_range = self.build_limit_range(config)?;
|
||||
|
||||
self.ensure_constraints(&namespace)?;
|
||||
|
||||
@@ -315,6 +422,9 @@ impl TenantManager for K8sTenantManager {
|
||||
debug!("Creating resource_quota for tenant {}", config.name);
|
||||
self.apply_resource(resource_quota, config).await?;
|
||||
|
||||
debug!("Creating limit_range for tenant {}", config.name);
|
||||
self.apply_resource(resource_limit_range, config).await?;
|
||||
|
||||
debug!("Creating network_policy for tenant {}", config.name);
|
||||
self.apply_resource(network_policy, config).await?;
|
||||
|
||||
@@ -322,6 +432,10 @@ impl TenantManager for K8sTenantManager {
|
||||
"Success provisionning K8s tenant id {} name {}",
|
||||
config.id, config.name
|
||||
);
|
||||
self.store_config(config);
|
||||
Ok(())
|
||||
}
|
||||
async fn get_tenant_config(&self) -> Option<TenantConfig> {
|
||||
self.k8s_tenant_config.get().cloned()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,4 +15,6 @@ pub trait TenantManager {
|
||||
/// # Arguments
|
||||
/// * `config`: The desired configuration for the new tenant.
|
||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>;
|
||||
|
||||
async fn get_tenant_config(&self) -> Option<TenantConfig>;
|
||||
}
|
||||
|
||||
8
harmony/src/domain/topology/upgradeable.rs
Normal file
8
harmony/src/domain/topology/upgradeable.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::{interpret::InterpretError, inventory::Inventory};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Upgradeable<T>: Send + Sync {
|
||||
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>;
|
||||
}
|
||||
@@ -7,13 +7,18 @@ mod management;
|
||||
mod tftp;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
pub use management::*;
|
||||
use opnsense_config_xml::Host;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
executors::ExecutorError,
|
||||
topology::{IpAddress, LogicalHost},
|
||||
interpret::InterpretError,
|
||||
inventory::Inventory,
|
||||
modules::opnsense::OPNSenseLaunchUpgrade,
|
||||
score::Score,
|
||||
topology::{IpAddress, LogicalHost, Topology, upgradeable::Upgradeable},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -49,3 +54,17 @@ impl OPNSenseFirewall {
|
||||
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Upgradeable<T> for OPNSenseFirewall {
|
||||
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
OPNSenseLaunchUpgrade {
|
||||
opnsense: self.get_opnsense_config(),
|
||||
}
|
||||
.create_interpret()
|
||||
.execute(inventory, topology)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
42
harmony/src/modules/application/feature.rs
Normal file
42
harmony/src/modules/application/feature.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::topology::Topology;
|
||||
|
||||
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
||||
/// ContinuousIntegration, ContinuousDelivery
|
||||
#[async_trait]
|
||||
pub trait ApplicationFeature<T: Topology>:
|
||||
std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T>
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String>;
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
trait ApplicationFeatureClone<T: Topology> {
|
||||
fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>;
|
||||
}
|
||||
|
||||
impl<A, T: Topology> ApplicationFeatureClone<T> for A
|
||||
where
|
||||
A: ApplicationFeature<T> + Clone + 'static,
|
||||
{
|
||||
fn clone_box(&self) -> Box<dyn ApplicationFeature<T>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology> Clone for Box<dyn ApplicationFeature<T>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
354
harmony/src/modules/application/features/argo_types.rs
Normal file
354
harmony/src/modules/application/features/argo_types.rs
Normal file
@@ -0,0 +1,354 @@
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
use serde_with::skip_serializing_none;
|
||||
use serde_yaml::Value;
|
||||
|
||||
use crate::modules::application::features::CDApplicationConfig;
|
||||
|
||||
#[skip_serializing_none]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Helm {
|
||||
pub pass_credentials: Option<bool>,
|
||||
pub parameters: Vec<Value>,
|
||||
pub file_parameters: Vec<Value>,
|
||||
pub release_name: Option<String>,
|
||||
pub value_files: Vec<String>,
|
||||
pub ignore_missing_value_files: Option<bool>,
|
||||
pub values: Option<String>,
|
||||
pub values_object: Option<Value>,
|
||||
pub skip_crds: Option<bool>,
|
||||
pub skip_schema_validation: Option<bool>,
|
||||
pub version: Option<String>,
|
||||
pub kube_version: Option<String>,
|
||||
pub api_versions: Vec<String>,
|
||||
pub namespace: Option<String>,
|
||||
}
|
||||
|
||||
#[skip_serializing_none]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Source {
|
||||
// Using string for this because URL enforces a URL scheme at the beginning but Helm, ArgoCD, etc do not, and it can be counterproductive,
|
||||
// as the only way I've found to get OCI working isn't by using oci:// but rather no scheme at all
|
||||
#[serde(rename = "repoURL")]
|
||||
pub repo_url: String,
|
||||
pub target_revision: Option<String>,
|
||||
pub chart: String,
|
||||
pub helm: Helm,
|
||||
pub path: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Automated {
|
||||
pub prune: bool,
|
||||
pub self_heal: bool,
|
||||
pub allow_empty: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Backoff {
|
||||
pub duration: String,
|
||||
pub factor: u32,
|
||||
pub max_duration: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Retry {
|
||||
pub limit: u32,
|
||||
pub backoff: Backoff,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SyncPolicy {
|
||||
pub automated: Automated,
|
||||
pub sync_options: Vec<String>,
|
||||
pub retry: Retry,
|
||||
}
|
||||
|
||||
#[skip_serializing_none]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ArgoApplication {
|
||||
pub name: String,
|
||||
pub namespace: Option<String>,
|
||||
pub project: String,
|
||||
pub source: Source,
|
||||
pub sync_policy: SyncPolicy,
|
||||
pub revision_history_limit: u32,
|
||||
}
|
||||
|
||||
impl Default for ArgoApplication {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: Default::default(),
|
||||
namespace: Default::default(),
|
||||
project: Default::default(),
|
||||
source: Source {
|
||||
repo_url: "http://asdf".to_string(),
|
||||
target_revision: None,
|
||||
chart: "".to_string(),
|
||||
helm: Helm {
|
||||
pass_credentials: None,
|
||||
parameters: vec![],
|
||||
file_parameters: vec![],
|
||||
release_name: None,
|
||||
value_files: vec![],
|
||||
ignore_missing_value_files: None,
|
||||
values: None,
|
||||
values_object: None,
|
||||
skip_crds: None,
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
},
|
||||
sync_policy: SyncPolicy {
|
||||
automated: Automated {
|
||||
prune: false,
|
||||
self_heal: false,
|
||||
allow_empty: false,
|
||||
},
|
||||
sync_options: vec![],
|
||||
retry: Retry {
|
||||
limit: 5,
|
||||
backoff: Backoff {
|
||||
duration: "5s".to_string(),
|
||||
factor: 2,
|
||||
max_duration: "3m".to_string(),
|
||||
},
|
||||
},
|
||||
},
|
||||
revision_history_limit: 10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CDApplicationConfig> for ArgoApplication {
|
||||
fn from(value: CDApplicationConfig) -> Self {
|
||||
Self {
|
||||
name: value.name,
|
||||
namespace: Some(value.namespace),
|
||||
project: "default".to_string(),
|
||||
source: Source {
|
||||
repo_url: value.helm_chart_repo_url,
|
||||
target_revision: Some(value.version.to_string()),
|
||||
chart: value.helm_chart_name.clone(),
|
||||
path: value.helm_chart_name,
|
||||
helm: Helm {
|
||||
pass_credentials: None,
|
||||
parameters: vec![],
|
||||
file_parameters: vec![],
|
||||
release_name: None,
|
||||
value_files: vec![],
|
||||
ignore_missing_value_files: None,
|
||||
values: None,
|
||||
values_object: value.values_overrides,
|
||||
skip_crds: None,
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
},
|
||||
sync_policy: SyncPolicy {
|
||||
automated: Automated {
|
||||
prune: false,
|
||||
self_heal: false,
|
||||
allow_empty: true,
|
||||
},
|
||||
sync_options: vec![],
|
||||
retry: Retry {
|
||||
limit: 5,
|
||||
backoff: Backoff {
|
||||
duration: "5s".to_string(),
|
||||
factor: 2,
|
||||
max_duration: "3m".to_string(),
|
||||
},
|
||||
},
|
||||
},
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ArgoApplication {
|
||||
pub fn to_yaml(&self) -> serde_yaml::Value {
|
||||
let name = &self.name;
|
||||
let namespace = if let Some(ns) = self.namespace.as_ref() {
|
||||
&ns
|
||||
} else {
|
||||
"argocd"
|
||||
};
|
||||
let project = &self.project;
|
||||
let source = &self.source;
|
||||
|
||||
let yaml_str = format!(
|
||||
r#"
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: {name}
|
||||
# You'll usually want to add your resources to the argocd namespace.
|
||||
namespace: {namespace}
|
||||
spec:
|
||||
# The project the application belongs to.
|
||||
project: {project}
|
||||
|
||||
# Destination cluster and namespace to deploy the application
|
||||
destination:
|
||||
# cluster API URL
|
||||
server: https://kubernetes.default.svc
|
||||
# or cluster name
|
||||
# name: in-cluster
|
||||
# The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace
|
||||
namespace: {namespace}
|
||||
|
||||
"#
|
||||
);
|
||||
|
||||
let mut yaml_value: Value =
|
||||
serde_yaml::from_str(yaml_str.as_str()).expect("couldn't parse string to YAML");
|
||||
|
||||
let spec = yaml_value
|
||||
.get_mut("spec")
|
||||
.expect("couldn't get spec from yaml")
|
||||
.as_mapping_mut()
|
||||
.expect("couldn't unwrap spec as mutable mapping");
|
||||
|
||||
let source =
|
||||
serde_yaml::to_value(&self.source).expect("couldn't serialize source to value");
|
||||
let sync_policy = serde_yaml::to_value(&self.sync_policy)
|
||||
.expect("couldn't serialize sync_policy to value");
|
||||
let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit)
|
||||
.expect("couldn't serialize revision_history_limit to value");
|
||||
|
||||
spec.insert(
|
||||
serde_yaml::to_value("source").expect("string to value failed"),
|
||||
source,
|
||||
);
|
||||
spec.insert(
|
||||
serde_yaml::to_value("syncPolicy").expect("string to value failed"),
|
||||
sync_policy,
|
||||
);
|
||||
spec.insert(
|
||||
serde_yaml::to_value("revisionHistoryLimit")
|
||||
.expect("couldn't convert str to yaml value"),
|
||||
revision_history_limit,
|
||||
);
|
||||
|
||||
debug!("spec: {}", serde_yaml::to_string(spec).unwrap());
|
||||
debug!(
|
||||
"entire yaml_value: {}",
|
||||
serde_yaml::to_string(&yaml_value).unwrap()
|
||||
);
|
||||
|
||||
yaml_value
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use crate::modules::application::features::{
|
||||
ArgoApplication, Automated, Backoff, Helm, Retry, Source, SyncPolicy,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_argo_application_to_yaml_happy_path() {
|
||||
let app = ArgoApplication {
|
||||
name: "test".to_string(),
|
||||
namespace: Some("test-ns".to_string()),
|
||||
project: "test-project".to_string(),
|
||||
source: Source {
|
||||
repo_url: "http://test".to_string(),
|
||||
target_revision: None,
|
||||
chart: "test-chart".to_string(),
|
||||
helm: Helm {
|
||||
pass_credentials: None,
|
||||
parameters: vec![],
|
||||
file_parameters: vec![],
|
||||
release_name: Some("test-release-neame".to_string()),
|
||||
value_files: vec![],
|
||||
ignore_missing_value_files: None,
|
||||
values: None,
|
||||
values_object: None,
|
||||
skip_crds: None,
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
},
|
||||
sync_policy: SyncPolicy {
|
||||
automated: Automated {
|
||||
prune: false,
|
||||
self_heal: false,
|
||||
allow_empty: false,
|
||||
},
|
||||
sync_options: vec![],
|
||||
retry: Retry {
|
||||
limit: 5,
|
||||
backoff: Backoff {
|
||||
duration: "5s".to_string(),
|
||||
factor: 2,
|
||||
max_duration: "3m".to_string(),
|
||||
},
|
||||
},
|
||||
},
|
||||
revision_history_limit: 10,
|
||||
};
|
||||
|
||||
let expected_yaml_output = r#"apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test
|
||||
namespace: test-ns
|
||||
spec:
|
||||
project: test-project
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: test-ns
|
||||
source:
|
||||
repoURL: http://test
|
||||
chart: test-chart
|
||||
helm:
|
||||
parameters: []
|
||||
fileParameters: []
|
||||
releaseName: test-release-neame
|
||||
valueFiles: []
|
||||
apiVersions: []
|
||||
path: ''
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: false
|
||||
selfHeal: false
|
||||
allowEmpty: false
|
||||
syncOptions: []
|
||||
retry:
|
||||
limit: 5
|
||||
backoff:
|
||||
duration: 5s
|
||||
factor: 2
|
||||
maxDuration: 3m
|
||||
revisionHistoryLimit: 10"#;
|
||||
|
||||
assert_eq!(
|
||||
expected_yaml_output.trim(),
|
||||
serde_yaml::to_string(&app.clone().to_yaml())
|
||||
.unwrap()
|
||||
.trim()
|
||||
);
|
||||
}
|
||||
}
|
||||
229
harmony/src/modules/application/features/continuous_delivery.rs
Normal file
229
harmony/src/modules/application/features/continuous_delivery.rs
Normal file
@@ -0,0 +1,229 @@
|
||||
use std::{io::Write, process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::{error, info};
|
||||
use serde_yaml::Value;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::application::{
|
||||
Application, ApplicationFeature, HelmPackage, OCICompliant,
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||
};
|
||||
|
||||
/// ContinuousDelivery in Harmony provides this functionality :
|
||||
///
|
||||
/// - **Package** the application
|
||||
/// - **Push** to an artifact registry
|
||||
/// - **Deploy** to a testing environment
|
||||
/// - **Deploy** to a production environment
|
||||
///
|
||||
/// It is intended to be used as an application feature passed down to an ApplicationInterpret. For
|
||||
/// example :
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// let app = RustApplicationScore {
|
||||
/// name: "My Rust App".to_string(),
|
||||
/// features: vec![ContinuousDelivery::default()],
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// *Note :*
|
||||
///
|
||||
/// By default, the Harmony Opinionated Pipeline is built using these technologies :
|
||||
///
|
||||
/// - Gitea Action (executes pipeline steps)
|
||||
/// - Docker to build an OCI container image
|
||||
/// - Helm chart to package Kubernetes resources
|
||||
/// - Harbor as artifact registru
|
||||
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
||||
/// - Kubernetes for runtime orchestration
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
|
||||
pub application: Arc<A>,
|
||||
}
|
||||
|
||||
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
async fn deploy_to_local_k3d(
|
||||
&self,
|
||||
app_name: String,
|
||||
chart_url: String,
|
||||
image_name: String,
|
||||
) -> Result<(), String> {
|
||||
let config = Config::load().expect("couldn't load config");
|
||||
|
||||
error!(
|
||||
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
|
||||
);
|
||||
|
||||
error!("TODO hardcoded k3d bin path is wrong");
|
||||
let k3d_bin_path = config.data_dir.join("k3d").join("k3d");
|
||||
// --- 1. Import the container image into the k3d cluster ---
|
||||
info!(
|
||||
"Importing image '{}' into k3d cluster 'harmony'",
|
||||
image_name
|
||||
);
|
||||
let import_output = Command::new(&k3d_bin_path)
|
||||
.args(["image", "import", &image_name, "--cluster", "harmony"])
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute k3d image import: {}", e))?;
|
||||
|
||||
if !import_output.status.success() {
|
||||
return Err(format!(
|
||||
"Failed to import image to k3d: {}",
|
||||
String::from_utf8_lossy(&import_output.stderr)
|
||||
));
|
||||
}
|
||||
|
||||
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
|
||||
info!("Retrieving kubeconfig for k3d cluster 'harmony'");
|
||||
let kubeconfig_output = Command::new(&k3d_bin_path)
|
||||
.args(["kubeconfig", "get", "harmony"])
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute k3d kubeconfig get: {}", e))?;
|
||||
|
||||
if !kubeconfig_output.status.success() {
|
||||
return Err(format!(
|
||||
"Failed to get kubeconfig from k3d: {}",
|
||||
String::from_utf8_lossy(&kubeconfig_output.stderr)
|
||||
));
|
||||
}
|
||||
|
||||
let mut temp_kubeconfig = NamedTempFile::new()
|
||||
.map_err(|e| format!("Failed to create temp file for kubeconfig: {}", e))?;
|
||||
temp_kubeconfig
|
||||
.write_all(&kubeconfig_output.stdout)
|
||||
.map_err(|e| format!("Failed to write to temp kubeconfig file: {}", e))?;
|
||||
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
|
||||
|
||||
// --- 3. Install or upgrade the Helm chart in the cluster ---
|
||||
info!(
|
||||
"Deploying Helm chart '{}' to namespace '{}'",
|
||||
chart_url, app_name
|
||||
);
|
||||
let release_name = app_name.to_lowercase(); // Helm release names are often lowercase
|
||||
let helm_output = Command::new("helm")
|
||||
.args([
|
||||
"upgrade",
|
||||
"--install",
|
||||
&release_name,
|
||||
&chart_url,
|
||||
"--namespace",
|
||||
&app_name,
|
||||
"--create-namespace",
|
||||
"--wait", // Wait for the deployment to be ready
|
||||
"--kubeconfig",
|
||||
kubeconfig_path,
|
||||
])
|
||||
.spawn()
|
||||
.map_err(|e| format!("Failed to execute helm upgrade: {}", e))?
|
||||
.wait_with_output()
|
||||
.map_err(|e| format!("Failed to execute helm upgrade: {}", e))?;
|
||||
|
||||
if !helm_output.status.success() {
|
||||
return Err(format!(
|
||||
"Failed to deploy Helm chart: {}",
|
||||
String::from_utf8_lossy(&helm_output.stderr)
|
||||
));
|
||||
}
|
||||
|
||||
info!("Successfully deployed '{}' to local k3d cluster.", app_name);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<
|
||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
let image = self.application.image_name();
|
||||
|
||||
// TODO
|
||||
error!(
|
||||
"TODO reverse helm chart packaging and docker image build. I put helm package first for faster iterations"
|
||||
);
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
// url, etc..)
|
||||
// Or ask for it when unknown
|
||||
|
||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||
info!("Pushed new helm chart {helm_chart}");
|
||||
|
||||
error!("TODO Make building image configurable/skippable");
|
||||
let image = self.application.build_push_oci_image().await?;
|
||||
info!("Pushed new docker image {image}");
|
||||
|
||||
info!("Installing ContinuousDelivery feature");
|
||||
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
||||
// by the topology only and we should not have to know how to perform tasks like this for
|
||||
// which the topology should be responsible.
|
||||
//
|
||||
// That said, this will require some careful architectural decisions, since the concept of
|
||||
// deployment targets / profiles is probably a layer of complexity that we won't be
|
||||
// completely able to avoid
|
||||
//
|
||||
// I'll try something for now that must be thought through after : att a deployment_profile
|
||||
// function to the topology trait that returns a profile, then anybody who needs it can
|
||||
// access it. This forces every Topology to understand the concept of targets though... So
|
||||
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
||||
// goes. It still does not feel right though.
|
||||
match topology.current_target() {
|
||||
DeploymentTarget::LocalDev => {
|
||||
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
||||
.await?;
|
||||
}
|
||||
target => {
|
||||
info!("Deploying to target {target:?}");
|
||||
let score = ArgoHelmScore {
|
||||
namespace: "harmonydemo-staging".to_string(),
|
||||
openshift: false,
|
||||
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.1.0").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
||||
values_overrides: None,
|
||||
name: "harmony-demo-rust-webapp".to_string(),
|
||||
namespace: "harmonydemo-staging".to_string(),
|
||||
})],
|
||||
};
|
||||
score
|
||||
.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"ContinuousDelivery".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// For now this is entirely bound to K8s / ArgoCD, will have to be revisited when we support
|
||||
/// more CD systems
|
||||
pub struct CDApplicationConfig {
|
||||
pub version: Version,
|
||||
pub helm_chart_repo_url: String,
|
||||
pub helm_chart_name: String,
|
||||
pub values_overrides: Option<Value>,
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
}
|
||||
|
||||
pub trait ContinuousDeliveryApplication {
|
||||
fn get_config(&self) -> CDApplicationConfig;
|
||||
}
|
||||
42
harmony/src/modules/application/features/endpoint.rs
Normal file
42
harmony/src/modules/application/features/endpoint.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
modules::application::ApplicationFeature,
|
||||
topology::{K8sclient, Topology},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PublicEndpoint {
|
||||
application_port: u16,
|
||||
}
|
||||
|
||||
/// Use port 3000 as default port. Harmony wants to provide "sane defaults" in general, and in this
|
||||
/// particular context, using port 80 goes against our philosophy to provide production grade
|
||||
/// defaults out of the box. Using an unprivileged port is a good security practice and will allow
|
||||
/// for unprivileged containers to work with this out of the box.
|
||||
///
|
||||
/// Now, why 3000 specifically? Many popular web/network frameworks use it by default, there is no
|
||||
/// perfect answer for this but many Rust and Python libraries tend to use 3000.
|
||||
impl Default for PublicEndpoint {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
application_port: 3000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// For now we only suport K8s ingress, but we will support more stuff at some point
|
||||
#[async_trait]
|
||||
impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint {
|
||||
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
|
||||
info!(
|
||||
"Making sure public endpoint is installed for port {}",
|
||||
self.application_port
|
||||
);
|
||||
todo!()
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"PublicEndpoint".to_string()
|
||||
}
|
||||
}
|
||||
1003
harmony/src/modules/application/features/helm_argocd_score.rs
Normal file
1003
harmony/src/modules/application/features/helm_argocd_score.rs
Normal file
File diff suppressed because it is too large
Load Diff
14
harmony/src/modules/application/features/mod.rs
Normal file
14
harmony/src/modules/application/features/mod.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
mod endpoint;
|
||||
pub use endpoint::*;
|
||||
|
||||
mod monitoring;
|
||||
pub use monitoring::*;
|
||||
|
||||
mod continuous_delivery;
|
||||
pub use continuous_delivery::*;
|
||||
|
||||
mod helm_argocd_score;
|
||||
pub use helm_argocd_score::*;
|
||||
|
||||
mod argo_types;
|
||||
pub use argo_types::*;
|
||||
111
harmony/src/modules/application/features/monitoring.rs
Normal file
111
harmony/src/modules/application/features/monitoring.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use log::{debug, info};
|
||||
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{ApplicationFeature, OCICompliant},
|
||||
monitoring::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver,
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{NamespaceSelector, ServiceMonitor},
|
||||
},
|
||||
ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Monitoring {
|
||||
pub application: Arc<dyn OCICompliant>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> ApplicationFeature<T>
|
||||
for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
info!("Ensuring monitoring is available for application");
|
||||
|
||||
let ntfy = NtfyScore {
|
||||
// namespace: topology
|
||||
// .get_tenant_config()
|
||||
// .await
|
||||
// .expect("couldn't get tenant config")
|
||||
// .name,
|
||||
namespace: self.application.name(),
|
||||
host: "localhost".to_string(),
|
||||
};
|
||||
ntfy.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
.await
|
||||
.expect("couldn't create interpret for ntfy");
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
let ntfy_default_auth_header = format!(
|
||||
"Basic {}",
|
||||
general_purpose::STANDARD.encode(format!(
|
||||
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||
))
|
||||
);
|
||||
|
||||
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||
|
||||
let ntfy_default_auth_param = general_purpose::STANDARD
|
||||
.encode(ntfy_default_auth_header)
|
||||
.replace("=", "");
|
||||
|
||||
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||
|
||||
let ntfy_receiver = WebhookReceiver {
|
||||
name: "ntfy-webhook".to_string(),
|
||||
url: Url::Url(
|
||||
url::Url::parse(
|
||||
format!(
|
||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||
self.application.name()
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
let mut service_monitor = ServiceMonitor::default();
|
||||
service_monitor.namespace_selector = Some(NamespaceSelector {
|
||||
any: true,
|
||||
match_names: vec![],
|
||||
});
|
||||
|
||||
service_monitor.name = "rust-webapp".to_string();
|
||||
|
||||
// let alerting_score = ApplicationPrometheusMonitoringScore {
|
||||
// receivers: vec![Box::new(ntfy_receiver)],
|
||||
// rules: vec![],
|
||||
// service_monitors: vec![service_monitor],
|
||||
// };
|
||||
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(ntfy_receiver)],
|
||||
rules: vec![],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
|
||||
alerting_score
|
||||
.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"Monitoring".to_string()
|
||||
}
|
||||
}
|
||||
80
harmony/src/modules/application/mod.rs
Normal file
80
harmony/src/modules/application/mod.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
mod feature;
|
||||
pub mod features;
|
||||
pub mod oci;
|
||||
mod rust;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use feature::*;
|
||||
use log::info;
|
||||
pub use oci::*;
|
||||
pub use rust::*;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
topology::Topology,
|
||||
};
|
||||
|
||||
pub trait Application: std::fmt::Debug + Send + Sync {
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ApplicationInterpret<A: Application, T: Topology + std::fmt::Debug> {
|
||||
features: Vec<Box<dyn ApplicationFeature<T>>>,
|
||||
application: Arc<A>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for ApplicationInterpret<A, T> {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let app_name = self.application.name();
|
||||
info!(
|
||||
"Preparing {} features [{}] for application {app_name}",
|
||||
self.features.len(),
|
||||
self.features
|
||||
.iter()
|
||||
.map(|f| f.name())
|
||||
.collect::<Vec<String>>()
|
||||
.join(", ")
|
||||
);
|
||||
for feature in self.features.iter() {
|
||||
info!(
|
||||
"Installing feature {} for application {app_name}",
|
||||
feature.name()
|
||||
);
|
||||
let _ = match feature.ensure_installed(topology).await {
|
||||
Ok(()) => (),
|
||||
Err(msg) => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Application Interpret failed to install feature : {msg}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
}
|
||||
Ok(Outcome::success("successfully created app".to_string()))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Application
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
Version::from("1.0.0").unwrap()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
21
harmony/src/modules/application/oci.rs
Normal file
21
harmony/src/modules/application/oci.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Application;
|
||||
|
||||
#[async_trait]
|
||||
pub trait OCICompliant: Application {
|
||||
async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here
|
||||
|
||||
fn image_name(&self) -> String;
|
||||
|
||||
fn local_image_name(&self) -> String;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait HelmPackage: Application {
|
||||
/// Generates, packages, and pushes a Helm chart for the web application to an OCI registry.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>;
|
||||
}
|
||||
605
harmony/src/modules/application/rust.rs
Normal file
605
harmony/src/modules/application/rust.rs
Normal file
@@ -0,0 +1,605 @@
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bollard::query_parameters::PushImageOptionsBuilder;
|
||||
use bollard::{Docker, body_full};
|
||||
use dockerfile_builder::Dockerfile;
|
||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||
use futures_util::StreamExt;
|
||||
use log::{debug, error, info};
|
||||
use serde::Serialize;
|
||||
use tar::Archive;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::{
|
||||
score::Score,
|
||||
topology::{Topology, Url},
|
||||
};
|
||||
|
||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct ApplicationScore<A: Application + Serialize, T: Topology + Clone + Serialize>
|
||||
where
|
||||
Arc<A>: Serialize + Clone,
|
||||
{
|
||||
pub features: Vec<Box<dyn ApplicationFeature<T>>>,
|
||||
pub application: Arc<A>,
|
||||
}
|
||||
|
||||
impl<
|
||||
A: Application + Serialize + Clone + 'static,
|
||||
T: Topology + std::fmt::Debug + Clone + Serialize + 'static,
|
||||
> Score<T> for ApplicationScore<A, T>
|
||||
where
|
||||
Arc<A>: Serialize,
|
||||
{
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
Box::new(ApplicationInterpret {
|
||||
features: self.features.clone(),
|
||||
application: self.application.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("Application: {}", self.application.name())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum RustWebFramework {
|
||||
Leptos,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RustWebapp {
|
||||
pub name: String,
|
||||
pub domain: Url,
|
||||
/// The path to the root of the Rust project to be containerized.
|
||||
pub project_root: PathBuf,
|
||||
pub framework: Option<RustWebFramework>,
|
||||
}
|
||||
|
||||
impl Application for RustWebapp {
|
||||
fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
||||
|
||||
// 1. Create the Helm chart files on disk.
|
||||
let chart_dir = self
|
||||
.create_helm_chart_files(image_url)
|
||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
|
||||
// 2. Package the chart into a .tgz archive.
|
||||
let packaged_chart_path = self
|
||||
.package_helm_chart(&chart_dir)
|
||||
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
|
||||
info!(
|
||||
"Successfully packaged Helm chart: {}",
|
||||
packaged_chart_path.to_string_lossy()
|
||||
);
|
||||
|
||||
// 3. Push the packaged chart to the OCI registry.
|
||||
let oci_chart_url = self
|
||||
.push_helm_chart(&packaged_chart_path)
|
||||
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
|
||||
info!("Successfully pushed Helm chart to: {}", oci_chart_url);
|
||||
|
||||
Ok(oci_chart_url)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl OCICompliant for RustWebapp {
|
||||
/// Builds a Docker image for the Rust web application using a multi-stage build,
|
||||
/// pushes it to the configured OCI registry, and returns the full image tag.
|
||||
async fn build_push_oci_image(&self) -> Result<String, String> {
|
||||
// This function orchestrates the build and push process.
|
||||
// It's async to match the trait definition, though the underlying docker commands are blocking.
|
||||
info!("Starting OCI image build and push for '{}'", self.name);
|
||||
|
||||
// 1. Build the local image by calling the synchronous helper function.
|
||||
let local_image_name = self.local_image_name();
|
||||
self.build_docker_image(&local_image_name)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
|
||||
info!(
|
||||
"Successfully built local Docker image: {}",
|
||||
local_image_name
|
||||
);
|
||||
|
||||
let remote_image_name = self.image_name();
|
||||
// 2. Push the image to the registry.
|
||||
self.push_docker_image(&local_image_name, &remote_image_name)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
|
||||
info!("Successfully pushed Docker image to: {}", remote_image_name);
|
||||
|
||||
Ok(remote_image_name)
|
||||
}
|
||||
|
||||
fn local_image_name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
|
||||
fn image_name(&self) -> String {
|
||||
let config = Config::load().expect("couldn't load config");
|
||||
|
||||
format!(
|
||||
"{}/{}/{}",
|
||||
config.registry_url,
|
||||
config.registry_project,
|
||||
&self.local_image_name()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of helper methods for building and pushing the Docker image.
|
||||
impl RustWebapp {
|
||||
/// Generates a multi-stage Dockerfile for a Rust application.
|
||||
fn build_dockerfile(&self) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let mut dockerfile = Dockerfile::new();
|
||||
|
||||
self.build_builder_image(&mut dockerfile);
|
||||
|
||||
// Save the Dockerfile to a uniquely named file in the project root to avoid conflicts.
|
||||
let dockerfile_path = self.project_root.join("Dockerfile.harmony");
|
||||
fs::write(&dockerfile_path, dockerfile.to_string())?;
|
||||
|
||||
Ok(dockerfile_path)
|
||||
}
|
||||
|
||||
/// Builds the Docker image using the generated Dockerfile.
|
||||
pub async fn build_docker_image(
|
||||
&self,
|
||||
image_name: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
info!("Generating Dockerfile for '{}'", self.name);
|
||||
let _dockerfile_path = self.build_dockerfile()?;
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
|
||||
let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default()
|
||||
.dockerfile("Dockerfile.harmony")
|
||||
.t(image_name)
|
||||
.q(false)
|
||||
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
|
||||
.platform("linux/x86_64");
|
||||
|
||||
let mut temp_tar_builder = tar::Builder::new(Vec::new());
|
||||
let _ = temp_tar_builder
|
||||
.append_dir_all("", self.project_root.clone())
|
||||
.unwrap();
|
||||
let archive = temp_tar_builder
|
||||
.into_inner()
|
||||
.expect("couldn't finish creating tar");
|
||||
let archived_files = Archive::new(archive.as_slice())
|
||||
.entries()
|
||||
.unwrap()
|
||||
.map(|entry| entry.unwrap().path().unwrap().into_owned())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
debug!("files in docker tar: {:#?}", archived_files);
|
||||
|
||||
let mut image_build_stream = docker.build_image(
|
||||
build_image_options.build(),
|
||||
None,
|
||||
Some(body_full(archive.into())),
|
||||
);
|
||||
|
||||
while let Some(msg) = image_build_stream.next().await {
|
||||
println!("Message: {msg:?}");
|
||||
}
|
||||
|
||||
Ok(image_name.to_string())
|
||||
}
|
||||
|
||||
/// Tags and pushes a Docker image to the configured remote registry.
|
||||
async fn push_docker_image(
|
||||
&self,
|
||||
image_name: &str,
|
||||
full_tag: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
info!("Pushing docker image {full_tag}");
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
|
||||
// let push_options = PushImageOptionsBuilder::new().tag(tag);
|
||||
|
||||
let mut push_image_stream =
|
||||
docker.push_image(full_tag, Some(PushImageOptionsBuilder::new().build()), None);
|
||||
|
||||
while let Some(msg) = push_image_stream.next().await {
|
||||
println!("Message: {msg:?}");
|
||||
}
|
||||
|
||||
Ok(full_tag.to_string())
|
||||
}
|
||||
|
||||
/// Checks the output of a process command for success.
|
||||
fn check_output(
|
||||
&self,
|
||||
output: &process::Output,
|
||||
msg: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
if !output.status.success() {
|
||||
let error_message = format!("{}: {}", msg, String::from_utf8_lossy(&output.stderr));
|
||||
return Err(error_message.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_builder_image(&self, dockerfile: &mut Dockerfile) {
|
||||
match self.framework {
|
||||
Some(RustWebFramework::Leptos) => {
|
||||
// --- Stage 1: Builder for Leptos ---
|
||||
dockerfile.push(FROM::from("rust:bookworm as builder"));
|
||||
|
||||
// Install dependencies, cargo-binstall, and clean up in one layer
|
||||
dockerfile.push(RUN::from(
|
||||
"apt-get update && \
|
||||
apt-get install -y --no-install-recommends clang wget && \
|
||||
wget https://github.com/cargo-bins/cargo-binstall/releases/latest/download/cargo-binstall-x86_64-unknown-linux-musl.tgz && \
|
||||
tar -xvf cargo-binstall-x86_64-unknown-linux-musl.tgz && \
|
||||
cp cargo-binstall /usr/local/cargo/bin && \
|
||||
rm cargo-binstall-x86_64-unknown-linux-musl.tgz cargo-binstall && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*"
|
||||
));
|
||||
|
||||
// Install cargo-leptos
|
||||
dockerfile.push(RUN::from("cargo binstall cargo-leptos -y"));
|
||||
|
||||
// Add the WASM target
|
||||
dockerfile.push(RUN::from("rustup target add wasm32-unknown-unknown"));
|
||||
|
||||
// Set up workdir, copy source, and build
|
||||
dockerfile.push(WORKDIR::from("/app"));
|
||||
dockerfile.push(COPY::from(". ."));
|
||||
dockerfile.push(RUN::from("cargo leptos build --release -vv"));
|
||||
// --- Stage 2: Final Image ---
|
||||
dockerfile.push(FROM::from("debian:bookworm-slim"));
|
||||
|
||||
// Create a non-root user for security.
|
||||
dockerfile.push(RUN::from(
|
||||
"groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser",
|
||||
));
|
||||
|
||||
dockerfile.push(ENV::from("LEPTOS_SITE_ADDR=0.0.0.0:3000"));
|
||||
dockerfile.push(EXPOSE::from("3000/tcp"));
|
||||
dockerfile.push(WORKDIR::from("/home/appuser"));
|
||||
|
||||
// Copy static files
|
||||
dockerfile.push(
|
||||
CopyBuilder::builder()
|
||||
.from("builder")
|
||||
.src("/app/target/site/pkg")
|
||||
.dest("/home/appuser/pkg")
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
// Copy the compiled binary from the builder stage.
|
||||
error!(
|
||||
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
|
||||
);
|
||||
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
||||
let binary_path_in_final = format!("/home/appuser/{}", self.name);
|
||||
dockerfile.push(
|
||||
CopyBuilder::builder()
|
||||
.from("builder")
|
||||
.src(binary_path_in_builder)
|
||||
.dest(&binary_path_in_final)
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Run as the non-root user.
|
||||
dockerfile.push(USER::from("appuser"));
|
||||
|
||||
// Set the command to run the application.
|
||||
dockerfile.push(CMD::from(binary_path_in_final));
|
||||
}
|
||||
None => {
|
||||
// --- Stage 1: Builder for a generic Rust app ---
|
||||
dockerfile.push(FROM::from("rust:latest as builder"));
|
||||
|
||||
// Install the wasm32 target as required.
|
||||
dockerfile.push(RUN::from("rustup target add wasm32-unknown-unknown"));
|
||||
dockerfile.push(WORKDIR::from("/app"));
|
||||
|
||||
// Copy the source code and build the application.
|
||||
dockerfile.push(COPY::from(". ."));
|
||||
dockerfile.push(RUN::from("cargo build --release --locked"));
|
||||
// --- Stage 2: Final Image ---
|
||||
dockerfile.push(FROM::from("debian:bookworm-slim"));
|
||||
|
||||
// Create a non-root user for security.
|
||||
dockerfile.push(RUN::from(
|
||||
"groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser",
|
||||
));
|
||||
|
||||
// Copy only the compiled binary from the builder stage.
|
||||
error!(
|
||||
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
|
||||
);
|
||||
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
||||
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
|
||||
dockerfile.push(
|
||||
CopyBuilder::builder()
|
||||
.from("builder")
|
||||
.src(binary_path_in_builder)
|
||||
.dest(&binary_path_in_final)
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Run as the non-root user.
|
||||
dockerfile.push(USER::from("appuser"));
|
||||
|
||||
// Set the command to run the application.
|
||||
dockerfile.push(CMD::from(binary_path_in_final));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates all necessary files for a basic Helm chart.
|
||||
fn create_helm_chart_files(
|
||||
&self,
|
||||
image_url: &str,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_name = format!("{}-chart", self.name);
|
||||
let chart_dir = self
|
||||
.project_root
|
||||
.join(".harmony_generated")
|
||||
.join("helm")
|
||||
.join(&chart_name);
|
||||
let templates_dir = chart_dir.join("templates");
|
||||
fs::create_dir_all(&templates_dir)?;
|
||||
|
||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||
|
||||
// Create Chart.yaml
|
||||
let chart_yaml = format!(
|
||||
r#"
|
||||
apiVersion: v2
|
||||
name: {}
|
||||
description: A Helm chart for the {} web application.
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "{}"
|
||||
"#,
|
||||
chart_name, self.name, image_tag
|
||||
);
|
||||
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
|
||||
|
||||
// Create values.yaml
|
||||
let values_yaml = format!(
|
||||
r#"
|
||||
# Default values for {}.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: {}
|
||||
pullPolicy: IfNotPresent
|
||||
# Overridden by the chart's appVersion
|
||||
tag: "{}"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 3000
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
# Annotations for cert-manager to handle SSL.
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
# Add other annotations like nginx ingress class if needed
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- secretName: {}-tls
|
||||
hosts:
|
||||
- chart-example.local
|
||||
|
||||
"#,
|
||||
chart_name, image_repo, image_tag, self.name
|
||||
);
|
||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||
|
||||
// Create templates/_helpers.tpl
|
||||
let helpers_tpl = r#"
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "chart.name" -}}
|
||||
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "chart.fullname" -}}
|
||||
{{- $name := default .Chart.Name $.Values.nameOverride }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
"#;
|
||||
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
|
||||
|
||||
// Create templates/service.yaml
|
||||
let service_yaml = r#"
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "chart.fullname" . }}
|
||||
spec:
|
||||
type: {{ $.Values.service.type }}
|
||||
ports:
|
||||
- name: main
|
||||
port: {{ $.Values.service.port | default 3000 }}
|
||||
targetPort: {{ $.Values.service.port | default 3000 }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ include "chart.name" . }}
|
||||
"#;
|
||||
fs::write(templates_dir.join("service.yaml"), service_yaml)?;
|
||||
|
||||
// Create templates/deployment.yaml
|
||||
let deployment_yaml = r#"
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "chart.fullname" . }}
|
||||
spec:
|
||||
replicas: {{ $.Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "chart.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "chart.name" . }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ $.Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: main
|
||||
containerPort: {{ $.Values.service.port | default 3000 }}
|
||||
protocol: TCP
|
||||
"#;
|
||||
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
||||
|
||||
// Create templates/ingress.yaml
|
||||
let ingress_yaml = r#"
|
||||
{{- if $.Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "chart.fullname" . }}
|
||||
annotations:
|
||||
{{- toYaml $.Values.ingress.annotations | nindent 4 }}
|
||||
spec:
|
||||
{{- if $.Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range $.Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range $.Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "chart.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port | default 3000 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
"#;
|
||||
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
|
||||
|
||||
Ok(chart_dir)
|
||||
}
|
||||
|
||||
/// Packages a Helm chart directory into a .tgz file.
|
||||
fn package_helm_chart(
|
||||
&self,
|
||||
chart_dir: &PathBuf,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname");
|
||||
info!(
|
||||
"Launching `helm package {}` cli with CWD {}",
|
||||
chart_dirname.to_string_lossy(),
|
||||
&self
|
||||
.project_root
|
||||
.join(".harmony_generated")
|
||||
.join("helm")
|
||||
.to_string_lossy()
|
||||
);
|
||||
let output = process::Command::new("helm")
|
||||
.args(["package", chart_dirname.to_str().unwrap()])
|
||||
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
|
||||
.output()?;
|
||||
|
||||
self.check_output(&output, "Failed to package Helm chart")?;
|
||||
|
||||
// Helm prints the path of the created chart to stdout.
|
||||
let tgz_name = String::from_utf8(output.stdout)?
|
||||
.trim()
|
||||
.split_whitespace()
|
||||
.last()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
if tgz_name.is_empty() {
|
||||
return Err("Could not determine packaged chart filename.".into());
|
||||
}
|
||||
|
||||
// The output from helm is relative, so we join it with the execution directory.
|
||||
Ok(self
|
||||
.project_root
|
||||
.join(".harmony_generated")
|
||||
.join("helm")
|
||||
.join(tgz_name))
|
||||
}
|
||||
|
||||
/// Pushes a packaged Helm chart to an OCI registry.
|
||||
fn push_helm_chart(
|
||||
&self,
|
||||
packaged_chart_path: &PathBuf,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
let config = Config::load().expect("couldn't load config");
|
||||
|
||||
// The chart name is the file stem of the .tgz file
|
||||
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();
|
||||
let oci_push_url = format!("oci://{}/{}", config.registry_url, config.registry_project);
|
||||
let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name);
|
||||
|
||||
info!(
|
||||
"Pushing Helm chart {} to {}",
|
||||
packaged_chart_path.to_string_lossy(),
|
||||
oci_push_url
|
||||
);
|
||||
|
||||
let output = process::Command::new("helm")
|
||||
.args(["push", packaged_chart_path.to_str().unwrap(), &oci_push_url])
|
||||
.output()?;
|
||||
|
||||
self.check_output(&output, "Pushing Helm chart failed")?;
|
||||
|
||||
// The final URL includes the version tag, which is part of the file name
|
||||
let version = chart_file_name.rsplit_once('-').unwrap().1;
|
||||
debug!("pull url {oci_pull_url}");
|
||||
debug!("push url {oci_push_url}");
|
||||
Ok(format!("{}:{}", oci_pull_url, version))
|
||||
}
|
||||
}
|
||||
@@ -10,14 +10,25 @@ use crate::{
|
||||
topology::{HttpServer, Topology, Url},
|
||||
};
|
||||
|
||||
/// Configure an HTTP server that is provided by the Topology
|
||||
///
|
||||
/// This Score will let you easily specify a file path to be served by the HTTP server
|
||||
///
|
||||
/// For example, if you have a folder of assets at `/var/www/assets` simply do :
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// StaticFilesHttpScore {
|
||||
/// files_to_serve: url!("file:///var/www/assets"),
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug, new, Clone, Serialize)]
|
||||
pub struct HttpScore {
|
||||
pub struct StaticFilesHttpScore {
|
||||
files_to_serve: Url,
|
||||
}
|
||||
|
||||
impl<T: Topology + HttpServer> Score<T> for HttpScore {
|
||||
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(HttpInterpret::new(self.clone()))
|
||||
Box::new(StaticFilesHttpInterpret::new(self.clone()))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
@@ -26,12 +37,12 @@ impl<T: Topology + HttpServer> Score<T> for HttpScore {
|
||||
}
|
||||
|
||||
#[derive(Debug, new, Clone)]
|
||||
pub struct HttpInterpret {
|
||||
score: HttpScore,
|
||||
pub struct StaticFilesHttpInterpret {
|
||||
score: StaticFilesHttpScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HttpServer> Interpret<T> for HttpInterpret {
|
||||
impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
|
||||
@@ -5,7 +5,7 @@ use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
config::HARMONY_CONFIG_DIR,
|
||||
config::Config,
|
||||
data::{Id, Version},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
@@ -21,8 +21,10 @@ pub struct K3DInstallationScore {
|
||||
|
||||
impl Default for K3DInstallationScore {
|
||||
fn default() -> Self {
|
||||
let config = Config::load().expect("couldn't load config");
|
||||
|
||||
Self {
|
||||
installation_path: HARMONY_CONFIG_DIR.join("k3d"),
|
||||
installation_path: config.data_dir.join("k3d"),
|
||||
cluster_name: "harmony".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ use async_trait::async_trait;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||
use crate::config::Config as HarmonyConfig;
|
||||
use crate::modules::k8s::ingress::K8sIngressScore;
|
||||
use crate::topology::HelmCommand;
|
||||
use crate::{
|
||||
@@ -355,7 +355,12 @@ opcache.fast_shutdown=1
|
||||
}
|
||||
|
||||
fn push_docker_image(&self, image_name: &str) -> Result<String, Box<dyn std::error::Error>> {
|
||||
let full_tag = format!("{}/{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT, &image_name);
|
||||
let config = HarmonyConfig::load().expect("couldn't load config");
|
||||
|
||||
let full_tag = format!(
|
||||
"{}/{}/{}",
|
||||
config.registry_url, config.registry_project, &image_name
|
||||
);
|
||||
let output = std::process::Command::new("docker")
|
||||
.args(["tag", image_name, &full_tag])
|
||||
.output()?;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
pub mod application;
|
||||
pub mod cert_manager;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
|
||||
@@ -4,9 +4,12 @@ use serde_yaml::{Mapping, Value};
|
||||
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
modules::monitoring::kube_prometheus::{
|
||||
prometheus::{Prometheus, PrometheusReceiver},
|
||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||
modules::monitoring::{
|
||||
kube_prometheus::{
|
||||
prometheus::{KubePrometheus, KubePrometheusReceiver},
|
||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||
},
|
||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||
},
|
||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||
};
|
||||
@@ -37,6 +40,26 @@ impl PrometheusReceiver for DiscordWebhook {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||
sender.install_receiver(self).await
|
||||
}
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl KubePrometheusReceiver for DiscordWebhook {
|
||||
fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
||||
self.get_config().await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertChannelConfig for DiscordWebhook {
|
||||
async fn get_config(&self) -> AlertManagerChannelConfig {
|
||||
|
||||
@@ -4,9 +4,12 @@ use serde_yaml::{Mapping, Value};
|
||||
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
modules::monitoring::kube_prometheus::{
|
||||
prometheus::{Prometheus, PrometheusReceiver},
|
||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||
modules::monitoring::{
|
||||
kube_prometheus::{
|
||||
prometheus::{KubePrometheus, KubePrometheusReceiver},
|
||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||
},
|
||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||
},
|
||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||
};
|
||||
@@ -36,6 +39,25 @@ impl PrometheusReceiver for WebhookReceiver {
|
||||
self.get_config().await
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||
sender.install_receiver(self).await
|
||||
}
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl KubePrometheusReceiver for WebhookReceiver {
|
||||
fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
||||
self.get_config().await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertChannelConfig for WebhookReceiver {
|
||||
|
||||
@@ -5,13 +5,26 @@ use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
modules::monitoring::kube_prometheus::{
|
||||
prometheus::{Prometheus, PrometheusRule},
|
||||
types::{AlertGroup, AlertManagerAdditionalPromRules},
|
||||
modules::monitoring::{
|
||||
kube_prometheus::{
|
||||
prometheus::{KubePrometheus, KubePrometheusRule},
|
||||
types::{AlertGroup, AlertManagerAdditionalPromRules},
|
||||
},
|
||||
prometheus::prometheus::{Prometheus, PrometheusRule},
|
||||
},
|
||||
topology::oberservability::monitoring::AlertRule,
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
|
||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||
sender.install_rule(&self).await
|
||||
}
|
||||
fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
|
||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||
@@ -41,6 +54,25 @@ impl PrometheusRule for AlertManagerRuleGroup {
|
||||
}
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl KubePrometheusRule for AlertManagerRuleGroup {
|
||||
fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules {
|
||||
let mut additional_prom_rules = BTreeMap::new();
|
||||
|
||||
additional_prom_rules.insert(
|
||||
self.name.clone(),
|
||||
AlertGroup {
|
||||
groups: vec![self.clone()],
|
||||
},
|
||||
);
|
||||
AlertManagerAdditionalPromRules {
|
||||
rules: additional_prom_rules,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AlertManagerRuleGroup {
|
||||
pub fn new(name: &str, rules: Vec<PrometheusAlertRule>) -> AlertManagerRuleGroup {
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
modules::monitoring::{
|
||||
kube_prometheus::types::ServiceMonitor,
|
||||
prometheus::{prometheus::Prometheus, prometheus_config::PrometheusConfig},
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, Topology,
|
||||
oberservability::monitoring::{AlertReceiver, AlertRule, AlertingInterpret},
|
||||
tenant::TenantManager,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ApplicationPrometheusMonitoringScore {
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
|
||||
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
|
||||
pub service_monitors: Vec<ServiceMonitor>,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
let mut prom_config = PrometheusConfig::new();
|
||||
prom_config.alert_manager = true;
|
||||
|
||||
let config = Arc::new(Mutex::new(prom_config));
|
||||
config
|
||||
.try_lock()
|
||||
.expect("couldn't lock config")
|
||||
.additional_service_monitors = self.service_monitors.clone();
|
||||
Box::new(AlertingInterpret {
|
||||
sender: Prometheus::new(),
|
||||
receivers: self.receivers.clone(),
|
||||
rules: self.rules.clone(),
|
||||
})
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"ApplicationPrometheusMonitoringScore".to_string()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
pub mod k8s_application_monitoring_score;
|
||||
28
harmony/src/modules/monitoring/grafana/helm/helm_grafana.rs
Normal file
28
harmony/src/modules/monitoring/grafana/helm/helm_grafana.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::modules::helm::chart::HelmChartScore;
|
||||
|
||||
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
|
||||
let values = format!(
|
||||
r#"
|
||||
rbac:
|
||||
namespaced: true
|
||||
sidecar:
|
||||
dashboards:
|
||||
enabled: true
|
||||
"#
|
||||
);
|
||||
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
||||
release_name: NonBlankString::from_str("grafana").unwrap(),
|
||||
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
|
||||
chart_version: None,
|
||||
values_overrides: None,
|
||||
values_yaml: Some(values.to_string()),
|
||||
create_namespace: true,
|
||||
install_only: true,
|
||||
repository: None,
|
||||
}
|
||||
}
|
||||
1
harmony/src/modules/monitoring/grafana/helm/mod.rs
Normal file
1
harmony/src/modules/monitoring/grafana/helm/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod helm_grafana;
|
||||
1
harmony/src/modules/monitoring/grafana/mod.rs
Normal file
1
harmony/src/modules/monitoring/grafana/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod helm;
|
||||
@@ -1,13 +1,12 @@
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::modules::monitoring::{
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig},
|
||||
use crate::modules::monitoring::kube_prometheus::types::{
|
||||
AlertManagerAdditionalPromRules, AlertManagerChannelConfig, ServiceMonitor,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct KubePrometheusConfig {
|
||||
pub namespace: String,
|
||||
pub namespace: Option<String>,
|
||||
pub default_rules: bool,
|
||||
pub windows_monitoring: bool,
|
||||
pub alert_manager: bool,
|
||||
@@ -26,11 +25,12 @@ pub struct KubePrometheusConfig {
|
||||
pub prometheus_operator: bool,
|
||||
pub alert_receiver_configs: Vec<AlertManagerChannelConfig>,
|
||||
pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
|
||||
pub additional_service_monitors: Vec<ServiceMonitor>,
|
||||
}
|
||||
impl KubePrometheusConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
namespace: "monitoring".into(),
|
||||
namespace: None,
|
||||
default_rules: true,
|
||||
windows_monitoring: false,
|
||||
alert_manager: true,
|
||||
@@ -38,17 +38,18 @@ impl KubePrometheusConfig {
|
||||
node_exporter: false,
|
||||
prometheus: true,
|
||||
kubernetes_service_monitors: true,
|
||||
kubernetes_api_server: false,
|
||||
kubelet: false,
|
||||
kube_controller_manager: false,
|
||||
kube_etcd: false,
|
||||
kube_proxy: false,
|
||||
kubernetes_api_server: true,
|
||||
kubelet: true,
|
||||
kube_controller_manager: true,
|
||||
kube_etcd: true,
|
||||
kube_proxy: true,
|
||||
kube_state_metrics: true,
|
||||
prometheus_operator: true,
|
||||
core_dns: false,
|
||||
kube_scheduler: false,
|
||||
core_dns: true,
|
||||
kube_scheduler: true,
|
||||
alert_receiver_configs: vec![],
|
||||
alert_rules: vec![],
|
||||
additional_service_monitors: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,8 @@ use crate::modules::{
|
||||
helm::chart::HelmChartScore,
|
||||
monitoring::kube_prometheus::types::{
|
||||
AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig,
|
||||
AlertManagerRoute, AlertManagerValues,
|
||||
AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits,
|
||||
PrometheusConfig, Requests, Resources,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -36,8 +37,50 @@ pub fn kube_prometheus_helm_chart_score(
|
||||
let node_exporter = config.node_exporter.to_string();
|
||||
let prometheus_operator = config.prometheus_operator.to_string();
|
||||
let prometheus = config.prometheus.to_string();
|
||||
let resource_limit = Resources {
|
||||
limits: Limits {
|
||||
memory: "100Mi".to_string(),
|
||||
cpu: "100m".to_string(),
|
||||
},
|
||||
requests: Requests {
|
||||
memory: "100Mi".to_string(),
|
||||
cpu: "100m".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
fn indent_lines(s: &str, spaces: usize) -> String {
|
||||
let pad = " ".repeat(spaces);
|
||||
s.lines()
|
||||
.map(|line| format!("{pad}{line}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
fn resource_block(resource: &Resources, indent_level: usize) -> String {
|
||||
let yaml = serde_yaml::to_string(resource).unwrap();
|
||||
format!(
|
||||
"{}resources:\n{}",
|
||||
" ".repeat(indent_level),
|
||||
indent_lines(&yaml, indent_level + 2)
|
||||
)
|
||||
}
|
||||
let resource_section = resource_block(&resource_limit, 2);
|
||||
|
||||
let mut values = format!(
|
||||
r#"
|
||||
global:
|
||||
rbac:
|
||||
create: true
|
||||
prometheus:
|
||||
enabled: {prometheus}
|
||||
prometheusSpec:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 1000Mi
|
||||
defaultRules:
|
||||
create: {default_rules}
|
||||
rules:
|
||||
@@ -77,35 +120,183 @@ defaultRules:
|
||||
windows: true
|
||||
windowsMonitoring:
|
||||
enabled: {windows_monitoring}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
grafana:
|
||||
enabled: {grafana}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
initChownData:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
sidecar:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
kubernetesServiceMonitors:
|
||||
enabled: {kubernetes_service_monitors}
|
||||
kubeApiServer:
|
||||
enabled: {kubernetes_api_server}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
kubelet:
|
||||
enabled: {kubelet}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
kubeControllerManager:
|
||||
enabled: {kube_controller_manager}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
coreDns:
|
||||
enabled: {core_dns}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
kubeEtcd:
|
||||
enabled: {kube_etcd}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
kubeScheduler:
|
||||
enabled: {kube_scheduler}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
kubeProxy:
|
||||
enabled: {kube_proxy}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
kubeStateMetrics:
|
||||
enabled: {kube_state_metrics}
|
||||
kube-state-metrics:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
nodeExporter:
|
||||
enabled: {node_exporter}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
prometheus-node-exporter:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
prometheusOperator:
|
||||
enabled: {prometheus_operator}
|
||||
prometheus:
|
||||
enabled: {prometheus}
|
||||
enabled: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
prometheusConfigReloader:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
admissionWebhooks:
|
||||
deployment:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
patch:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
"#,
|
||||
);
|
||||
|
||||
let prometheus_config =
|
||||
crate::modules::monitoring::kube_prometheus::types::PrometheusConfigValues {
|
||||
prometheus: PrometheusConfig {
|
||||
prometheus: bool::from_str(prometheus.as_str()).expect("couldn't parse bool"),
|
||||
additional_service_monitors: config.additional_service_monitors.clone(),
|
||||
},
|
||||
};
|
||||
let prometheus_config_yaml =
|
||||
serde_yaml::to_string(&prometheus_config).expect("Failed to serialize YAML");
|
||||
|
||||
debug!(
|
||||
"serialized prometheus config: \n {:#}",
|
||||
prometheus_config_yaml
|
||||
);
|
||||
values.push_str(&prometheus_config_yaml);
|
||||
|
||||
// add required null receiver for prometheus alert manager
|
||||
let mut null_receiver = Mapping::new();
|
||||
null_receiver.insert(
|
||||
@@ -145,6 +336,30 @@ prometheus:
|
||||
alertmanager: AlertManager {
|
||||
enabled: config.alert_manager,
|
||||
config: alert_manager_channel_config,
|
||||
alertmanager_spec: AlertManagerSpec {
|
||||
resources: Resources {
|
||||
limits: Limits {
|
||||
memory: "100Mi".to_string(),
|
||||
cpu: "100m".to_string(),
|
||||
},
|
||||
requests: Requests {
|
||||
memory: "100Mi".to_string(),
|
||||
cpu: "100m".to_string(),
|
||||
},
|
||||
},
|
||||
},
|
||||
init_config_reloader: ConfigReloader {
|
||||
resources: Resources {
|
||||
limits: Limits {
|
||||
memory: "100Mi".to_string(),
|
||||
cpu: "100m".to_string(),
|
||||
},
|
||||
requests: Requests {
|
||||
memory: "100Mi".to_string(),
|
||||
cpu: "100m".to_string(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -185,7 +400,7 @@ prometheus:
|
||||
debug!("full values.yaml: \n {:#}", values);
|
||||
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(&config.namespace).unwrap()),
|
||||
namespace: Some(NonBlankString::from_str(&config.namespace.clone().unwrap()).unwrap()),
|
||||
release_name: NonBlankString::from_str("kube-prometheus").unwrap(),
|
||||
chart_name: NonBlankString::from_str(
|
||||
"oci://ghcr.io/prometheus-community/charts/kube-prometheus-stack",
|
||||
|
||||
@@ -2,27 +2,33 @@ use std::sync::{Arc, Mutex};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use super::{helm::config::KubePrometheusConfig, prometheus::Prometheus};
|
||||
use super::{helm::config::KubePrometheusConfig, prometheus::KubePrometheus};
|
||||
use crate::{
|
||||
modules::monitoring::kube_prometheus::types::ServiceMonitor,
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, Topology,
|
||||
oberservability::monitoring::{AlertReceiver, AlertRule, AlertingInterpret},
|
||||
tenant::TenantManager,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct HelmPrometheusAlertingScore {
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
|
||||
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<KubePrometheus>>>,
|
||||
pub rules: Vec<Box<dyn AlertRule<KubePrometheus>>>,
|
||||
pub service_monitors: Vec<ServiceMonitor>,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand> Score<T> for HelmPrometheusAlertingScore {
|
||||
impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlertingScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
let config = Arc::new(Mutex::new(KubePrometheusConfig::new()));
|
||||
config
|
||||
.try_lock()
|
||||
.expect("couldn't lock config")
|
||||
.additional_service_monitors = self.service_monitors.clone();
|
||||
Box::new(AlertingInterpret {
|
||||
sender: Prometheus {
|
||||
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
|
||||
},
|
||||
sender: KubePrometheus { config },
|
||||
receivers: self.receivers.clone(),
|
||||
rules: self.rules.clone(),
|
||||
})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use log::{debug, error};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
@@ -13,6 +13,7 @@ use crate::{
|
||||
HelmCommand, Topology,
|
||||
installable::Installable,
|
||||
oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender},
|
||||
tenant::TenantManager,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -26,14 +27,19 @@ use super::{
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl AlertSender for Prometheus {
|
||||
impl AlertSender for KubePrometheus {
|
||||
fn name(&self) -> String {
|
||||
"HelmKubePrometheus".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand> Installable<T> for Prometheus {
|
||||
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for KubePrometheus {
|
||||
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
self.configure_with_topology(topology).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
@@ -45,14 +51,31 @@ impl<T: Topology + HelmCommand> Installable<T> for Prometheus {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Prometheus {
|
||||
pub struct KubePrometheus {
|
||||
pub config: Arc<Mutex<KubePrometheusConfig>>,
|
||||
}
|
||||
|
||||
impl Prometheus {
|
||||
impl KubePrometheus {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn configure_with_topology<T: TenantManager>(&self, topology: &T) {
|
||||
let ns = topology
|
||||
.get_tenant_config()
|
||||
.await
|
||||
.map(|cfg| cfg.name.clone())
|
||||
.unwrap_or_else(|| "monitoring".to_string());
|
||||
error!("This must be refactored, see comments in pr #74");
|
||||
debug!("NS: {}", ns);
|
||||
self.config.lock().unwrap().namespace = Some(ns);
|
||||
}
|
||||
|
||||
pub async fn install_receiver(
|
||||
&self,
|
||||
prometheus_receiver: &dyn PrometheusReceiver,
|
||||
prometheus_receiver: &dyn KubePrometheusReceiver,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
||||
debug!(
|
||||
@@ -97,12 +120,12 @@ impl Prometheus {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait PrometheusReceiver: Send + Sync + std::fmt::Debug {
|
||||
pub trait KubePrometheusReceiver: Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> String;
|
||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
||||
}
|
||||
|
||||
impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
||||
impl Serialize for Box<dyn AlertReceiver<KubePrometheus>> {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
@@ -111,19 +134,19 @@ impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn AlertReceiver<Prometheus>> {
|
||||
impl Clone for Box<dyn AlertReceiver<KubePrometheus>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait PrometheusRule: Send + Sync + std::fmt::Debug {
|
||||
pub trait KubePrometheusRule: Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> String;
|
||||
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
||||
}
|
||||
|
||||
impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
||||
impl Serialize for Box<dyn AlertRule<KubePrometheus>> {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
@@ -132,7 +155,7 @@ impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn AlertRule<Prometheus>> {
|
||||
impl Clone for Box<dyn AlertRule<KubePrometheus>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
@@ -16,9 +16,17 @@ pub struct AlertManagerValues {
|
||||
pub alertmanager: AlertManager,
|
||||
}
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AlertManager {
|
||||
pub enabled: bool,
|
||||
pub config: AlertManagerConfig,
|
||||
pub alertmanager_spec: AlertManagerSpec,
|
||||
pub init_config_reloader: ConfigReloader,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ConfigReloader {
|
||||
pub resources: Resources,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
@@ -43,6 +51,30 @@ pub struct AlertManagerChannelConfig {
|
||||
pub channel_receiver: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AlertManagerSpec {
|
||||
pub(crate) resources: Resources,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Resources {
|
||||
pub limits: Limits,
|
||||
pub requests: Requests,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Limits {
|
||||
pub memory: String,
|
||||
pub cpu: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Requests {
|
||||
pub memory: String,
|
||||
pub cpu: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct AlertManagerAdditionalPromRules {
|
||||
#[serde(flatten)]
|
||||
@@ -53,3 +85,209 @@ pub struct AlertManagerAdditionalPromRules {
|
||||
pub struct AlertGroup {
|
||||
pub groups: Vec<AlertManagerRuleGroup>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum HTTPScheme {
|
||||
#[serde(rename = "http")]
|
||||
HTTP,
|
||||
#[serde(rename = "https")]
|
||||
HTTPS,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum Operator {
|
||||
In,
|
||||
NotIn,
|
||||
Exists,
|
||||
DoesNotExist,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PrometheusConfigValues {
|
||||
pub prometheus: PrometheusConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PrometheusConfig {
|
||||
pub prometheus: bool,
|
||||
pub additional_service_monitors: Vec<ServiceMonitor>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ServiceMonitorTLSConfig {
|
||||
// ## Path to the CA file
|
||||
// ##
|
||||
pub ca_file: Option<String>,
|
||||
|
||||
// ## Path to client certificate file
|
||||
// ##
|
||||
pub cert_file: Option<String>,
|
||||
|
||||
// ## Skip certificate verification
|
||||
// ##
|
||||
pub insecure_skip_verify: Option<bool>,
|
||||
|
||||
// ## Path to client key file
|
||||
// ##
|
||||
pub key_file: Option<String>,
|
||||
|
||||
// ## Server name used to verify host name
|
||||
// ##
|
||||
pub server_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ServiceMonitorEndpoint {
|
||||
// ## Name of the endpoint's service port
|
||||
// ## Mutually exclusive with targetPort
|
||||
pub port: Option<String>,
|
||||
|
||||
// ## Name or number of the endpoint's target port
|
||||
// ## Mutually exclusive with port
|
||||
pub target_port: Option<String>,
|
||||
|
||||
// ## File containing bearer token to be used when scraping targets
|
||||
// ##
|
||||
pub bearer_token_file: Option<String>,
|
||||
|
||||
// ## Interval at which metrics should be scraped
|
||||
// ##
|
||||
pub interval: Option<String>,
|
||||
|
||||
// ## HTTP path to scrape for metrics
|
||||
// ##
|
||||
pub path: String,
|
||||
|
||||
// ## HTTP scheme to use for scraping
|
||||
// ##
|
||||
pub scheme: HTTPScheme,
|
||||
|
||||
// ## TLS configuration to use when scraping the endpoint
|
||||
// ##
|
||||
pub tls_config: Option<ServiceMonitorTLSConfig>,
|
||||
|
||||
// ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
|
||||
// ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
|
||||
// ##
|
||||
// # - action: keep
|
||||
// # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||||
// # sourceLabels: [__name__]
|
||||
pub metric_relabelings: Vec<Mapping>,
|
||||
|
||||
// ## RelabelConfigs to apply to samples before scraping
|
||||
// ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
|
||||
// ##
|
||||
// # - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
// # separator: ;
|
||||
// # regex: ^(.*)$
|
||||
// # targetLabel: nodename
|
||||
// # replacement: $1
|
||||
// # action: replace
|
||||
pub relabelings: Vec<Mapping>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct MatchExpression {
|
||||
pub key: String,
|
||||
pub operator: Operator,
|
||||
pub values: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Selector {
|
||||
// # label selector for services
|
||||
pub match_labels: HashMap<String, String>,
|
||||
pub match_expressions: Vec<MatchExpression>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ServiceMonitor {
|
||||
pub name: String,
|
||||
|
||||
// # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart
|
||||
pub additional_labels: Option<HashMap<String, String>>,
|
||||
|
||||
// # Service label for use in assembling a job name of the form <label value>-<port>
|
||||
// # If no label is specified, the service name is used.
|
||||
pub job_label: Option<String>,
|
||||
|
||||
// # labels to transfer from the kubernetes service to the target
|
||||
pub target_labels: Vec<String>,
|
||||
|
||||
// # labels to transfer from the kubernetes pods to the target
|
||||
pub pod_target_labels: Vec<String>,
|
||||
|
||||
// # Label selector for services to which this ServiceMonitor applies
|
||||
// # Example which selects all services to be monitored
|
||||
// # with label "monitoredby" with values any of "example-service-1" or "example-service-2"
|
||||
// matchExpressions:
|
||||
// - key: "monitoredby"
|
||||
// operator: In
|
||||
// values:
|
||||
// - example-service-1
|
||||
// - example-service-2
|
||||
pub selector: Selector,
|
||||
|
||||
// # Namespaces from which services are selected
|
||||
// # Match any namespace
|
||||
// any: bool,
|
||||
// # Explicit list of namespace names to select
|
||||
// matchNames: Vec,
|
||||
pub namespace_selector: Option<NamespaceSelector>,
|
||||
|
||||
// # Endpoints of the selected service to be monitored
|
||||
pub endpoints: Vec<ServiceMonitorEndpoint>,
|
||||
|
||||
// # Fallback scrape protocol used by Prometheus for scraping metrics
|
||||
// # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
|
||||
pub fallback_scrape_protocol: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NamespaceSelector {
|
||||
pub any: bool,
|
||||
pub match_names: Vec<String>,
|
||||
}
|
||||
|
||||
impl Default for ServiceMonitor {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: Default::default(),
|
||||
additional_labels: Default::default(),
|
||||
job_label: Default::default(),
|
||||
target_labels: Default::default(),
|
||||
pod_target_labels: Default::default(),
|
||||
selector: Selector {
|
||||
match_labels: HashMap::new(),
|
||||
match_expressions: vec![],
|
||||
},
|
||||
namespace_selector: Default::default(),
|
||||
endpoints: Default::default(),
|
||||
fallback_scrape_protocol: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ServiceMonitorEndpoint {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
port: Some("80".to_string()),
|
||||
target_port: Default::default(),
|
||||
bearer_token_file: Default::default(),
|
||||
interval: Default::default(),
|
||||
path: "/metrics".to_string(),
|
||||
scheme: HTTPScheme::HTTP,
|
||||
tls_config: Default::default(),
|
||||
metric_relabelings: Default::default(),
|
||||
relabelings: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
pub mod alert_channel;
|
||||
pub mod alert_rule;
|
||||
pub mod application_monitoring;
|
||||
pub mod grafana;
|
||||
pub mod kube_prometheus;
|
||||
pub mod ntfy;
|
||||
pub mod prometheus;
|
||||
|
||||
1
harmony/src/modules/monitoring/ntfy/helm/mod.rs
Normal file
1
harmony/src/modules/monitoring/ntfy/helm/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod ntfy_helm_chart;
|
||||
83
harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs
Normal file
83
harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||
|
||||
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
|
||||
let values = format!(
|
||||
r#"
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: binwiederhier/ntfy
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "v2.12.0"
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
# annotations:
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
# annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: {host}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
|
||||
config:
|
||||
enabled: true
|
||||
data:
|
||||
# base-url: "https://ntfy.something.com"
|
||||
auth-file: "/var/cache/ntfy/user.db"
|
||||
auth-default-access: "deny-all"
|
||||
cache-file: "/var/cache/ntfy/cache.db"
|
||||
attachment-cache-dir: "/var/cache/ntfy/attachments"
|
||||
behind-proxy: true
|
||||
# web-root: "disable"
|
||||
enable-signup: false
|
||||
enable-login: "true"
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 200Mi
|
||||
"#,
|
||||
);
|
||||
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
|
||||
release_name: NonBlankString::from_str("ntfy").unwrap(),
|
||||
chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(),
|
||||
chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()),
|
||||
values_overrides: None,
|
||||
values_yaml: Some(values.to_string()),
|
||||
create_namespace: true,
|
||||
install_only: false,
|
||||
repository: Some(HelmRepository::new(
|
||||
"sarab97".to_string(),
|
||||
url::Url::parse("https://charts.sarabsingh.com").unwrap(),
|
||||
true,
|
||||
)),
|
||||
}
|
||||
}
|
||||
2
harmony/src/modules/monitoring/ntfy/mod.rs
Normal file
2
harmony/src/modules/monitoring/ntfy/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod helm;
|
||||
pub mod ntfy;
|
||||
170
harmony/src/modules/monitoring/ntfy/ntfy.rs
Normal file
170
harmony/src/modules/monitoring/ntfy/ntfy.rs
Normal file
@@ -0,0 +1,170 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
use strum::{Display, EnumString};
|
||||
|
||||
use crate::{
|
||||
data::{Id, Version},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology, k8s::K8sClient},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct NtfyScore {
|
||||
pub namespace: String,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(NtfyInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("Ntfy")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct NtfyInterpret {
|
||||
pub score: NtfyScore,
|
||||
}
|
||||
|
||||
#[derive(Debug, EnumString, Display)]
|
||||
enum NtfyAccessMode {
|
||||
#[strum(serialize = "read-write", serialize = "rw", to_string = "read-write")]
|
||||
ReadWrite,
|
||||
#[strum(
|
||||
serialize = "read-only",
|
||||
serialize = "ro",
|
||||
serialize = "read",
|
||||
to_string = "read-only"
|
||||
)]
|
||||
ReadOnly,
|
||||
#[strum(
|
||||
serialize = "write-only",
|
||||
serialize = "wo",
|
||||
serialize = "write",
|
||||
to_string = "write-only"
|
||||
)]
|
||||
WriteOnly,
|
||||
#[strum(serialize = "none", to_string = "deny")]
|
||||
Deny,
|
||||
}
|
||||
|
||||
#[derive(Debug, EnumString, Display)]
|
||||
enum NtfyRole {
|
||||
#[strum(serialize = "user", to_string = "user")]
|
||||
User,
|
||||
#[strum(serialize = "admin", to_string = "admin")]
|
||||
Admin,
|
||||
}
|
||||
|
||||
impl NtfyInterpret {
|
||||
async fn add_user(
|
||||
&self,
|
||||
k8s_client: Arc<K8sClient>,
|
||||
username: &str,
|
||||
password: &str,
|
||||
role: Option<NtfyRole>,
|
||||
) -> Result<(), String> {
|
||||
let role = match role {
|
||||
Some(r) => r,
|
||||
None => NtfyRole::User,
|
||||
};
|
||||
|
||||
k8s_client
|
||||
.exec_app(
|
||||
"ntfy".to_string(),
|
||||
Some(&self.score.namespace),
|
||||
vec![
|
||||
"sh",
|
||||
"-c",
|
||||
format!("NTFY_PASSWORD={password} ntfy user add --role={role} {username}")
|
||||
.as_str(),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_access(
|
||||
&self,
|
||||
k8s_client: Arc<K8sClient>,
|
||||
username: &str,
|
||||
topic: &str,
|
||||
mode: NtfyAccessMode,
|
||||
) -> Result<(), String> {
|
||||
k8s_client
|
||||
.exec_app(
|
||||
"ntfy".to_string(),
|
||||
Some(&self.score.namespace),
|
||||
vec![
|
||||
"sh",
|
||||
"-c",
|
||||
format!("ntfy access {username} {topic} {mode}").as_str(),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
|
||||
.create_interpret()
|
||||
.execute(inventory, topology)
|
||||
.await?;
|
||||
|
||||
debug!("installed ntfy helm chart");
|
||||
let client = topology
|
||||
.k8s_client()
|
||||
.await
|
||||
.expect("couldn't get k8s client");
|
||||
|
||||
client
|
||||
.wait_until_deployment_ready(
|
||||
"ntfy".to_string(),
|
||||
Some(&self.score.namespace.as_str()),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
debug!("created k8s client");
|
||||
|
||||
self.add_user(client, "harmony", "harmony", Some(NtfyRole::Admin))
|
||||
.await?;
|
||||
|
||||
debug!("exec into pod done");
|
||||
|
||||
Ok(Outcome::success("installed ntfy".to_string()))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
todo!()
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
1
harmony/src/modules/monitoring/prometheus/helm/mod.rs
Normal file
1
harmony/src/modules/monitoring/prometheus/helm/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod prometheus_helm;
|
||||
@@ -0,0 +1,47 @@
|
||||
use std::str::FromStr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use non_blank_string_rs::NonBlankString;
|
||||
|
||||
use crate::modules::{
|
||||
helm::chart::HelmChartScore, monitoring::prometheus::prometheus_config::PrometheusConfig,
|
||||
};
|
||||
|
||||
pub fn prometheus_helm_chart_score(config: Arc<Mutex<PrometheusConfig>>) -> HelmChartScore {
|
||||
let config = config.lock().unwrap();
|
||||
let ns = config.namespace.clone().unwrap();
|
||||
let values = format!(
|
||||
r#"
|
||||
rbac:
|
||||
create: true
|
||||
kube-state-metrics:
|
||||
enabled: false
|
||||
nodeExporter:
|
||||
enabled: false
|
||||
alertmanager:
|
||||
enabled: false
|
||||
pushgateway:
|
||||
enabled: false
|
||||
server:
|
||||
serviceAccount:
|
||||
create: false
|
||||
rbac:
|
||||
create: true
|
||||
fullnameOverride: prometheus-{ns}
|
||||
"#
|
||||
);
|
||||
HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(&config.namespace.clone().unwrap()).unwrap()),
|
||||
release_name: NonBlankString::from_str("prometheus").unwrap(),
|
||||
chart_name: NonBlankString::from_str(
|
||||
"oci://ghcr.io/prometheus-community/charts/prometheus",
|
||||
)
|
||||
.unwrap(),
|
||||
chart_version: None,
|
||||
values_overrides: None,
|
||||
values_yaml: Some(values.to_string()),
|
||||
create_namespace: true,
|
||||
install_only: true,
|
||||
repository: None,
|
||||
}
|
||||
}
|
||||
3
harmony/src/modules/monitoring/prometheus/mod.rs
Normal file
3
harmony/src/modules/monitoring/prometheus/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod helm;
|
||||
pub mod prometheus;
|
||||
pub mod prometheus_config;
|
||||
190
harmony/src/modules/monitoring/prometheus/prometheus.rs
Normal file
190
harmony/src/modules/monitoring/prometheus/prometheus.rs
Normal file
@@ -0,0 +1,190 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::{debug, error};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
interpret::{InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
grafana::helm::helm_grafana::grafana_helm_chart_score,
|
||||
kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig},
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
HelmCommand, Topology,
|
||||
installable::Installable,
|
||||
oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender},
|
||||
tenant::TenantManager,
|
||||
},
|
||||
};
|
||||
|
||||
use super::{
|
||||
helm::prometheus_helm::prometheus_helm_chart_score, prometheus_config::PrometheusConfig,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Prometheus {
|
||||
pub config: Arc<Mutex<PrometheusConfig>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AlertSender for Prometheus {
|
||||
fn name(&self) -> String {
|
||||
"Prometheus".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl Prometheus {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
config: Arc::new(Mutex::new(PrometheusConfig::new())),
|
||||
}
|
||||
}
|
||||
pub async fn configure_with_topology<T: TenantManager>(&self, topology: &T) {
|
||||
let ns = topology
|
||||
.get_tenant_config()
|
||||
.await
|
||||
.map(|cfg| cfg.name.clone())
|
||||
.unwrap_or_else(|| "monitoring".to_string());
|
||||
error!("This must be refactored, see comments in pr #74");
|
||||
debug!("NS: {}", ns);
|
||||
self.config.lock().unwrap().namespace = Some(ns);
|
||||
}
|
||||
|
||||
pub async fn install_receiver(
|
||||
&self,
|
||||
prometheus_receiver: &dyn PrometheusReceiver,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
||||
debug!(
|
||||
"adding alert receiver to prometheus config: {:#?}",
|
||||
&prom_receiver
|
||||
);
|
||||
let mut config = self.config.lock().unwrap();
|
||||
|
||||
config.alert_receiver_configs.push(prom_receiver);
|
||||
let prom_receiver_name = prometheus_receiver.name();
|
||||
debug!("installed alert receiver {}", &prom_receiver_name);
|
||||
Ok(Outcome::success(format!(
|
||||
"Sucessfully installed receiver {}",
|
||||
prom_receiver_name
|
||||
)))
|
||||
}
|
||||
|
||||
pub async fn install_rule(
|
||||
&self,
|
||||
prometheus_rule: &AlertManagerRuleGroup,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let prometheus_rule = prometheus_rule.configure_rule().await;
|
||||
let mut config = self.config.lock().unwrap();
|
||||
|
||||
config.alert_rules.push(prometheus_rule.clone());
|
||||
Ok(Outcome::success(format!(
|
||||
"Successfully installed alert rule: {:#?},",
|
||||
prometheus_rule
|
||||
)))
|
||||
}
|
||||
|
||||
pub async fn install_prometheus<T: Topology + HelmCommand + Send + Sync>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
prometheus_helm_chart_score(self.config.clone())
|
||||
.create_interpret()
|
||||
.execute(inventory, topology)
|
||||
.await
|
||||
}
|
||||
pub async fn install_grafana<T: Topology + HelmCommand + Send + Sync>(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let namespace = {
|
||||
let config = self.config.lock().unwrap();
|
||||
config.namespace.clone()
|
||||
};
|
||||
|
||||
if let Some(ns) = namespace.as_deref() {
|
||||
grafana_helm_chart_score(ns)
|
||||
.create_interpret()
|
||||
.execute(inventory, topology)
|
||||
.await
|
||||
} else {
|
||||
Err(InterpretError::new(format!(
|
||||
"could not install grafana, missing namespace",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for Prometheus {
|
||||
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
self.configure_with_topology(topology).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<(), InterpretError> {
|
||||
self.install_prometheus(inventory, topology).await?;
|
||||
|
||||
let install_grafana = {
|
||||
let config = self.config.lock().unwrap();
|
||||
config.grafana
|
||||
};
|
||||
|
||||
if install_grafana {
|
||||
self.install_grafana(inventory, topology).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait PrometheusReceiver: Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> String;
|
||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
||||
}
|
||||
|
||||
impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn AlertReceiver<Prometheus>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait PrometheusRule: Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> String;
|
||||
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
||||
}
|
||||
|
||||
impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn AlertRule<Prometheus>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
use crate::modules::monitoring::kube_prometheus::types::{
|
||||
AlertManagerAdditionalPromRules, AlertManagerChannelConfig, ServiceMonitor,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PrometheusConfig {
|
||||
pub namespace: Option<String>,
|
||||
pub default_rules: bool,
|
||||
pub alert_manager: bool,
|
||||
pub node_exporter: bool,
|
||||
pub kube_state_metrics: bool,
|
||||
pub grafana: bool,
|
||||
pub prometheus_pushgateway: bool,
|
||||
pub alert_receiver_configs: Vec<AlertManagerChannelConfig>,
|
||||
pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
|
||||
pub additional_service_monitors: Vec<ServiceMonitor>,
|
||||
}
|
||||
|
||||
impl PrometheusConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
namespace: None,
|
||||
default_rules: true,
|
||||
alert_manager: true,
|
||||
node_exporter: false,
|
||||
kube_state_metrics: false,
|
||||
grafana: true,
|
||||
prometheus_pushgateway: false,
|
||||
alert_receiver_configs: vec![],
|
||||
alert_rules: vec![],
|
||||
additional_service_monitors: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,15 +12,15 @@ use harmony_tui;
|
||||
pub struct Args {
|
||||
/// Run score(s) without prompt
|
||||
#[arg(short, long, default_value_t = false, conflicts_with = "interactive")]
|
||||
yes: bool,
|
||||
pub yes: bool,
|
||||
|
||||
/// Filter query
|
||||
#[arg(short, long, conflicts_with = "interactive")]
|
||||
filter: Option<String>,
|
||||
pub filter: Option<String>,
|
||||
|
||||
/// Run interactive TUI or not
|
||||
#[arg(short, long, default_value_t = false)]
|
||||
interactive: bool,
|
||||
pub interactive: bool,
|
||||
|
||||
/// Run all or nth, defaults to all
|
||||
#[arg(
|
||||
@@ -31,15 +31,15 @@ pub struct Args {
|
||||
conflicts_with = "number",
|
||||
conflicts_with = "interactive"
|
||||
)]
|
||||
all: bool,
|
||||
pub all: bool,
|
||||
|
||||
/// Run nth matching, zero indexed
|
||||
#[arg(short, long, default_value_t = 0, conflicts_with = "interactive")]
|
||||
number: usize,
|
||||
pub number: usize,
|
||||
|
||||
/// list scores, will also be affected by run filter
|
||||
#[arg(short, long, default_value_t = false, conflicts_with = "interactive")]
|
||||
list: bool,
|
||||
pub list: bool,
|
||||
}
|
||||
|
||||
fn maestro_scores_filter<T: Topology>(
|
||||
@@ -153,7 +153,7 @@ mod test {
|
||||
fn init_test_maestro() -> Maestro<HAClusterTopology> {
|
||||
let inventory = Inventory::autoload();
|
||||
let topology = HAClusterTopology::autoload();
|
||||
let mut maestro = Maestro::new(inventory, topology);
|
||||
let mut maestro = Maestro::new_without_initialization(inventory, topology);
|
||||
|
||||
maestro.register_all(vec![
|
||||
Box::new(SuccessScore {}),
|
||||
|
||||
@@ -9,12 +9,20 @@ It's designed to simplify the build process by either compiling a Harmony projec
|
||||
You can download and run the latest snapshot build with a single command. This will place the binary in ~/.local/bin, which should be in your PATH on most modern Linux distributions.
|
||||
|
||||
```bash
|
||||
|
||||
curl -Ls https://git.nationtech.io/NationTech/harmony/releases/download/snapshot-latest/harmony_composer \
|
||||
mkdir -p ~/.local/bin && \
|
||||
curl -L https://git.nationtech.io/NationTech/harmony/releases/download/snapshot-latest/harmony_composer \
|
||||
-o ~/.local/bin/harmony_composer && \
|
||||
chmod +x ~/.local/bin/harmony_composer
|
||||
chmod +x ~/.local/bin/harmony_composer && \
|
||||
alias hc=~/.local/bin/harmony_composer && \
|
||||
echo "\n\nharmony_composer installed successfully\!\n\nUse \`hc\` to run it.\n\nNote : this hc alias only works for the current shell session. Add 'alias hc=~/.local/bin/harmony_composer' to your '~/.bashrc' or '~/.zshrc' file to make it permanently available to your user."
|
||||
```
|
||||
|
||||
Then you can start using it with either :
|
||||
|
||||
- `harmony_composer` if `~/.local/bin` is in you `$PATH`
|
||||
- `hc` alias set up in your current shell session.
|
||||
- If you want to make the `hc` command always available, add `alias hc=~/.local/bin/harmony_composer` to your shell profile. Usually `~/.bashrc` for bash, `~/.zshrc` for zsh.
|
||||
|
||||
> ⚠️ Warning: Unstable Builds
|
||||
> The snapshot-latest tag points to the latest build from the master branch. It is unstable, unsupported, and intended only for early testing of new features. Please do not use it in production environments.
|
||||
|
||||
|
||||
@@ -73,10 +73,9 @@ async fn main() {
|
||||
.try_exists()
|
||||
.expect("couldn't check if path exists");
|
||||
|
||||
let harmony_bin_path: PathBuf;
|
||||
match harmony_path {
|
||||
let harmony_bin_path: PathBuf = match harmony_path {
|
||||
true => {
|
||||
harmony_bin_path = compile_harmony(
|
||||
compile_harmony(
|
||||
cli_args.compile_method,
|
||||
cli_args.compile_platform,
|
||||
cli_args.harmony_path.clone(),
|
||||
@@ -84,7 +83,7 @@ async fn main() {
|
||||
.await
|
||||
}
|
||||
false => todo!("implement autodetect code"),
|
||||
}
|
||||
};
|
||||
|
||||
match cli_args.command {
|
||||
Some(command) => match command {
|
||||
@@ -103,8 +102,10 @@ async fn main() {
|
||||
};
|
||||
|
||||
let check_output = Command::new(check_script)
|
||||
.output()
|
||||
.expect("failed to run check script");
|
||||
.spawn()
|
||||
.expect("failed to run check script")
|
||||
.wait_with_output()
|
||||
.unwrap();
|
||||
info!(
|
||||
"check stdout: {}, check stderr: {}",
|
||||
String::from_utf8(check_output.stdout).expect("couldn't parse from utf8"),
|
||||
@@ -112,18 +113,16 @@ async fn main() {
|
||||
);
|
||||
}
|
||||
Commands::Deploy(args) => {
|
||||
if args.staging {
|
||||
todo!("implement staging deployment");
|
||||
let deploy = if args.staging {
|
||||
todo!("implement staging deployment")
|
||||
} else if args.prod {
|
||||
todo!("implement prod deployment")
|
||||
} else {
|
||||
Command::new(harmony_bin_path).arg("-y").arg("-a").spawn()
|
||||
}
|
||||
.expect("failed to run harmony deploy");
|
||||
|
||||
if args.prod {
|
||||
todo!("implement prod deployment");
|
||||
}
|
||||
let deploy_output = Command::new(harmony_bin_path)
|
||||
.arg("-y")
|
||||
.arg("-a")
|
||||
.output()
|
||||
.expect("failed to run harmony deploy");
|
||||
let deploy_output = deploy.wait_with_output().unwrap();
|
||||
println!(
|
||||
"deploy output: {}",
|
||||
String::from_utf8(deploy_output.stdout).expect("couldn't parse from utf8")
|
||||
|
||||
@@ -41,7 +41,7 @@ pub mod tui {
|
||||
/// async fn main() {
|
||||
/// let inventory = Inventory::autoload();
|
||||
/// let topology = HAClusterTopology::autoload();
|
||||
/// let mut maestro = Maestro::new(inventory, topology);
|
||||
/// let mut maestro = Maestro::new_without_initialization(inventory, topology);
|
||||
///
|
||||
/// maestro.register_all(vec![
|
||||
/// Box::new(SuccessScore {}),
|
||||
|
||||
@@ -83,6 +83,7 @@ pub struct Interface {
|
||||
pub adv_dhcp_config_advanced: Option<MaybeString>,
|
||||
pub adv_dhcp_config_file_override: Option<MaybeString>,
|
||||
pub adv_dhcp_config_file_override_path: Option<MaybeString>,
|
||||
pub mtu: Option<u32>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::HAProxy;
|
||||
use crate::{data::dhcpd::DhcpInterface, xml_utils::to_xml_str};
|
||||
use log::error;
|
||||
use log::{debug, error};
|
||||
use uuid::Uuid;
|
||||
use yaserde::{MaybeString, NamedList, RawXml};
|
||||
use yaserde_derive::{YaDeserialize, YaSerialize};
|
||||
@@ -17,12 +17,12 @@ pub struct OPNsense {
|
||||
pub dhcpd: NamedList<DhcpInterface>,
|
||||
pub snmpd: Snmpd,
|
||||
pub syslog: Syslog,
|
||||
pub nat: Nat,
|
||||
pub nat: Option<Nat>,
|
||||
pub filter: Filters,
|
||||
pub load_balancer: Option<LoadBalancer>,
|
||||
pub rrd: Option<RawXml>,
|
||||
pub ntpd: Ntpd,
|
||||
pub widgets: Widgets,
|
||||
pub widgets: Option<Widgets>,
|
||||
pub revision: Revision,
|
||||
#[yaserde(rename = "OPNsense")]
|
||||
pub opnsense: OPNsenseXmlSection,
|
||||
@@ -46,10 +46,12 @@ pub struct OPNsense {
|
||||
pub pischem: Option<Pischem>,
|
||||
pub ifgroups: Ifgroups,
|
||||
pub dnsmasq: Option<RawXml>,
|
||||
pub wizardtemp: Option<RawXml>,
|
||||
}
|
||||
|
||||
impl From<String> for OPNsense {
|
||||
fn from(content: String) -> Self {
|
||||
debug!("XML content: {content}");
|
||||
yaserde::de::from_str(&content)
|
||||
.map_err(|e| println!("{}", e.to_string()))
|
||||
.expect("OPNSense received invalid string, should be full XML")
|
||||
@@ -242,6 +244,7 @@ pub struct Ssh {
|
||||
pub passwordauth: u8,
|
||||
pub keysig: MaybeString,
|
||||
pub permitrootlogin: u8,
|
||||
pub rekeylimit: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -271,6 +274,7 @@ pub struct Group {
|
||||
pub member: Vec<u32>,
|
||||
#[yaserde(rename = "priv")]
|
||||
pub priv_field: String,
|
||||
pub source_networks: Vec<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1506,7 +1510,7 @@ pub struct Vlans {
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Bridges {
|
||||
pub bridged: MaybeString,
|
||||
pub bridged: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
|
||||
@@ -22,4 +22,4 @@ tokio-util = { version = "0.7.13", features = [ "codec" ] }
|
||||
tokio-stream = "0.1.17"
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.4.1"
|
||||
pretty_assertions.workspace = true
|
||||
|
||||
Reference in New Issue
Block a user