Compare commits
22 Commits
feat/cd/lo
...
opnsense_u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
670b701f6a | ||
|
|
1eaae2016a | ||
|
|
c4f4a58dcf | ||
|
|
537da5800f | ||
| 3be2fa246c | |||
| 9452cf5616 | |||
| 9b7456e148 | |||
| 98f3f82ad5 | |||
| 3eca409f8d | |||
| c11a31c7a9 | |||
| 1a6d72dc17 | |||
| df9e21807e | |||
| b1bf4fd4d5 | |||
| f702ecd8c9 | |||
| a19b52e690 | |||
| b73f2e76d0 | |||
| b4534c6ee0 | |||
| 6149249a6c | |||
| d9935e20cb | |||
| 7b0f3b79b1 | |||
| e6612245a5 | |||
| b4f5b91a57 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@ target
|
|||||||
private_repos
|
private_repos
|
||||||
log/
|
log/
|
||||||
*.tgz
|
*.tgz
|
||||||
|
.gitignore
|
||||||
|
|||||||
153
Cargo.lock
generated
153
Cargo.lock
generated
@@ -219,6 +219,15 @@ dependencies = [
|
|||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "atomic"
|
||||||
|
version = "0.6.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340"
|
||||||
|
dependencies = [
|
||||||
|
"bytemuck",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "atomic-waker"
|
name = "atomic-waker"
|
||||||
version = "1.1.2"
|
version = "1.1.2"
|
||||||
@@ -409,6 +418,12 @@ version = "3.19.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
|
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bytemuck"
|
||||||
|
version = "1.23.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "byteorder"
|
name = "byteorder"
|
||||||
version = "1.5.0"
|
version = "1.5.0"
|
||||||
@@ -1343,6 +1358,7 @@ dependencies = [
|
|||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
"harmony_macros",
|
"harmony_macros",
|
||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
@@ -1355,6 +1371,7 @@ dependencies = [
|
|||||||
name = "example-rust"
|
name = "example-rust"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"base64 0.22.1",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
"harmony_cli",
|
"harmony_cli",
|
||||||
@@ -1427,6 +1444,31 @@ version = "0.2.9"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
|
checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "figment"
|
||||||
|
version = "0.10.19"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3"
|
||||||
|
dependencies = [
|
||||||
|
"atomic",
|
||||||
|
"pear",
|
||||||
|
"serde",
|
||||||
|
"uncased",
|
||||||
|
"version_check",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "filetime"
|
||||||
|
version = "0.2.25"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"libc",
|
||||||
|
"libredox",
|
||||||
|
"windows-sys 0.59.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flate2"
|
name = "flate2"
|
||||||
version = "1.1.2"
|
version = "1.1.2"
|
||||||
@@ -1726,6 +1768,8 @@ name = "harmony"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"base64 0.22.1",
|
||||||
|
"bollard",
|
||||||
"chrono",
|
"chrono",
|
||||||
"cidr",
|
"cidr",
|
||||||
"convert_case",
|
"convert_case",
|
||||||
@@ -1735,6 +1779,7 @@ dependencies = [
|
|||||||
"dyn-clone",
|
"dyn-clone",
|
||||||
"email_address",
|
"email_address",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
|
"figment",
|
||||||
"fqdn",
|
"fqdn",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"harmony_macros",
|
"harmony_macros",
|
||||||
@@ -1752,6 +1797,7 @@ dependencies = [
|
|||||||
"non-blank-string-rs",
|
"non-blank-string-rs",
|
||||||
"opnsense-config",
|
"opnsense-config",
|
||||||
"opnsense-config-xml",
|
"opnsense-config-xml",
|
||||||
|
"pretty_assertions",
|
||||||
"rand 0.9.1",
|
"rand 0.9.1",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
"russh",
|
"russh",
|
||||||
@@ -1760,9 +1806,11 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde-value",
|
"serde-value",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"serde_with",
|
||||||
"serde_yaml",
|
"serde_yaml",
|
||||||
"similar",
|
"similar",
|
||||||
"strum 0.27.1",
|
"strum 0.27.1",
|
||||||
|
"tar",
|
||||||
"temp-dir",
|
"temp-dir",
|
||||||
"temp-file",
|
"temp-file",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
@@ -2397,6 +2445,12 @@ version = "2.0.6"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd"
|
checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "inlinable_string"
|
||||||
|
version = "0.1.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "inout"
|
name = "inout"
|
||||||
version = "0.1.4"
|
version = "0.1.4"
|
||||||
@@ -2726,6 +2780,7 @@ checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.9.1",
|
"bitflags 2.9.1",
|
||||||
"libc",
|
"libc",
|
||||||
|
"redox_syscall",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -3217,6 +3272,29 @@ dependencies = [
|
|||||||
"hmac",
|
"hmac",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pear"
|
||||||
|
version = "0.2.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467"
|
||||||
|
dependencies = [
|
||||||
|
"inlinable_string",
|
||||||
|
"pear_codegen",
|
||||||
|
"yansi",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pear_codegen"
|
||||||
|
version = "0.2.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"proc-macro2-diagnostics",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pem"
|
name = "pem"
|
||||||
version = "3.0.5"
|
version = "3.0.5"
|
||||||
@@ -3479,6 +3557,19 @@ dependencies = [
|
|||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "proc-macro2-diagnostics"
|
||||||
|
version = "0.10.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"version_check",
|
||||||
|
"yansi",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "punycode"
|
name = "punycode"
|
||||||
version = "0.4.1"
|
version = "0.4.1"
|
||||||
@@ -4080,6 +4171,18 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "schemars"
|
||||||
|
version = "1.0.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d"
|
||||||
|
dependencies = [
|
||||||
|
"dyn-clone",
|
||||||
|
"ref-cast",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "scopeguard"
|
name = "scopeguard"
|
||||||
version = "1.2.0"
|
version = "1.2.0"
|
||||||
@@ -4280,22 +4383,36 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_with"
|
name = "serde_with"
|
||||||
version = "3.13.0"
|
version = "3.14.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42"
|
checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"chrono",
|
"chrono",
|
||||||
"hex",
|
"hex",
|
||||||
"indexmap 1.9.3",
|
"indexmap 1.9.3",
|
||||||
"indexmap 2.10.0",
|
"indexmap 2.10.0",
|
||||||
"schemars",
|
"schemars 0.9.0",
|
||||||
|
"schemars 1.0.3",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"serde_with_macros",
|
||||||
"time",
|
"time",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_with_macros"
|
||||||
|
version = "3.14.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
|
||||||
|
dependencies = [
|
||||||
|
"darling",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_yaml"
|
name = "serde_yaml"
|
||||||
version = "0.9.34+deprecated"
|
version = "0.9.34+deprecated"
|
||||||
@@ -4668,6 +4785,17 @@ version = "1.0.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
|
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tar"
|
||||||
|
version = "0.4.44"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a"
|
||||||
|
dependencies = [
|
||||||
|
"filetime",
|
||||||
|
"libc",
|
||||||
|
"xattr",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "temp-dir"
|
name = "temp-dir"
|
||||||
version = "0.1.16"
|
version = "0.1.16"
|
||||||
@@ -5084,6 +5212,15 @@ version = "0.1.7"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
|
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "uncased"
|
||||||
|
version = "0.9.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697"
|
||||||
|
dependencies = [
|
||||||
|
"version_check",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-ident"
|
name = "unicode-ident"
|
||||||
version = "1.0.18"
|
version = "1.0.18"
|
||||||
@@ -5713,6 +5850,16 @@ dependencies = [
|
|||||||
"tap",
|
"tap",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "xattr"
|
||||||
|
version = "1.5.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"rustix 1.0.7",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "xml-rs"
|
name = "xml-rs"
|
||||||
version = "0.8.26"
|
version = "0.8.26"
|
||||||
|
|||||||
@@ -52,3 +52,8 @@ convert_case = "0.8"
|
|||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
similar = "2"
|
similar = "2"
|
||||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||||
|
pretty_assertions = "1.4.1"
|
||||||
|
bollard = "0.19.1"
|
||||||
|
base64 = "0.22.1"
|
||||||
|
tar = "0.4.44"
|
||||||
|
figment = { version = "0.10.19", features = ["env"] }
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
http::HttpScore,
|
http::StaticFilesHttpScore,
|
||||||
ipxe::IpxeScore,
|
ipxe::IpxeScore,
|
||||||
okd::{
|
okd::{
|
||||||
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
||||||
@@ -126,7 +126,7 @@ async fn main() {
|
|||||||
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
||||||
|
|
||||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
let http_score = HttpScore::new(Url::LocalFolder(
|
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
));
|
));
|
||||||
let ipxe_score = IpxeScore::new();
|
let ipxe_score = IpxeScore::new();
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ async fn main() {
|
|||||||
|
|
||||||
maestro.register_all(vec![Box::new(NtfyScore {
|
maestro.register_all(vec![Box::new(NtfyScore {
|
||||||
namespace: "monitoring".to_string(),
|
namespace: "monitoring".to_string(),
|
||||||
|
host: "localhost".to_string(),
|
||||||
})]);
|
})]);
|
||||||
harmony_cli::init(maestro, None).await.unwrap();
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,3 +16,4 @@ harmony_macros = { path = "../../harmony_macros" }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ use harmony::{
|
|||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
http::HttpScore,
|
http::StaticFilesHttpScore,
|
||||||
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
||||||
opnsense::OPNsenseShellCommandScore,
|
opnsense::{OPNSenseLaunchUpgrade, OPNsenseShellCommandScore},
|
||||||
tftp::TftpScore,
|
tftp::TftpScore,
|
||||||
},
|
},
|
||||||
topology::{LogicalHost, UnmanagedRouter, Url},
|
topology::{LogicalHost, UnmanagedRouter, Url},
|
||||||
@@ -22,8 +22,10 @@ use harmony_macros::{ip, mac_address};
|
|||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
let firewall = harmony::topology::LogicalHost {
|
let firewall = harmony::topology::LogicalHost {
|
||||||
ip: ip!("192.168.5.229"),
|
ip: ip!("192.168.122.106"),
|
||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -81,7 +83,7 @@ async fn main() {
|
|||||||
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
||||||
|
|
||||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
let http_score = HttpScore::new(Url::LocalFolder(
|
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
));
|
));
|
||||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
@@ -95,9 +97,12 @@ async fn main() {
|
|||||||
opnsense: opnsense.get_opnsense_config(),
|
opnsense: opnsense.get_opnsense_config(),
|
||||||
command: "touch /tmp/helloharmonytouching".to_string(),
|
command: "touch /tmp/helloharmonytouching".to_string(),
|
||||||
}),
|
}),
|
||||||
|
// Box::new(OPNSenseLaunchUpgrade {
|
||||||
|
// opnsense: opnsense.get_opnsense_config(),
|
||||||
|
// }),
|
||||||
Box::new(SuccessScore {}),
|
Box::new(SuccessScore {}),
|
||||||
Box::new(ErrorScore {}),
|
Box::new(ErrorScore {}),
|
||||||
Box::new(PanicScore {}),
|
Box::new(PanicScore {}),
|
||||||
]);
|
]);
|
||||||
harmony_tui::init(maestro).await.unwrap();
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,3 +12,4 @@ tokio = { workspace = true }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
base64.workspace = true
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
modules::application::{
|
modules::application::{
|
||||||
RustWebFramework, RustWebapp, RustWebappScore, features::ContinuousDelivery,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
|
features::{ContinuousDelivery, Monitoring},
|
||||||
},
|
},
|
||||||
topology::{K8sAnywhereTopology, Url},
|
topology::{K8sAnywhereTopology, Url},
|
||||||
};
|
};
|
||||||
@@ -12,25 +13,31 @@ use harmony::{
|
|||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
let application = RustWebapp {
|
|
||||||
name: "harmony-example-rust-webapp".to_string(),
|
|
||||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
|
||||||
framework: Some(RustWebFramework::Leptos),
|
|
||||||
};
|
|
||||||
// TODO RustWebappScore should simply take a RustWebApp as config
|
|
||||||
let app = RustWebappScore {
|
|
||||||
name: "Example Rust Webapp".to_string(),
|
|
||||||
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
|
||||||
features: vec![Box::new(ContinuousDelivery {
|
|
||||||
application: Arc::new(application.clone()),
|
|
||||||
})],
|
|
||||||
application,
|
|
||||||
};
|
|
||||||
|
|
||||||
let topology = K8sAnywhereTopology::from_env();
|
let topology = K8sAnywhereTopology::from_env();
|
||||||
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
|
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
let application = Arc::new(RustWebapp {
|
||||||
|
name: "harmony-example-rust-webapp".to_string(),
|
||||||
|
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
|
||||||
|
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||||
|
framework: Some(RustWebFramework::Leptos),
|
||||||
|
});
|
||||||
|
|
||||||
|
let app = ApplicationScore {
|
||||||
|
features: vec![
|
||||||
|
Box::new(ContinuousDelivery {
|
||||||
|
application: application.clone(),
|
||||||
|
}), // TODO add monitoring, backups, multisite ha, etc
|
||||||
|
Box::new(Monitoring {
|
||||||
|
application: application.clone(),
|
||||||
|
}),
|
||||||
|
],
|
||||||
|
application,
|
||||||
|
};
|
||||||
|
|
||||||
maestro.register_all(vec![Box::new(app)]);
|
maestro.register_all(vec![Box::new(app)]);
|
||||||
harmony_cli::init(maestro, None).await.unwrap();
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ reqwest = { version = "0.11", features = ["blocking", "json"] }
|
|||||||
russh = "0.45.0"
|
russh = "0.45.0"
|
||||||
rust-ipmi = "0.1.1"
|
rust-ipmi = "0.1.1"
|
||||||
semver = "1.0.23"
|
semver = "1.0.23"
|
||||||
serde = { version = "1.0.209", features = ["derive"] }
|
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||||
serde_json = "1.0.127"
|
serde_json = "1.0.127"
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
derive-new.workspace = true
|
derive-new.workspace = true
|
||||||
@@ -58,3 +58,11 @@ futures-util = "0.3.31"
|
|||||||
tokio-util = "0.7.15"
|
tokio-util = "0.7.15"
|
||||||
strum = { version = "0.27.1", features = ["derive"] }
|
strum = { version = "0.27.1", features = ["derive"] }
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.20.0"
|
||||||
|
serde_with = "3.14.0"
|
||||||
|
bollard.workspace = true
|
||||||
|
tar.workspace = true
|
||||||
|
base64.workspace = true
|
||||||
|
figment.workspace = true
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
pretty_assertions.workspace = true
|
||||||
|
|||||||
@@ -1,15 +1,66 @@
|
|||||||
|
use figment::{
|
||||||
|
Error, Figment, Metadata, Profile, Provider,
|
||||||
|
providers::{Env, Format},
|
||||||
|
value::{Dict, Map},
|
||||||
|
};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
lazy_static! {
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
pub static ref HARMONY_DATA_DIR: PathBuf = directories::BaseDirs::new()
|
pub struct Config {
|
||||||
.unwrap()
|
pub data_dir: PathBuf,
|
||||||
.data_dir()
|
pub registry_url: String,
|
||||||
.join("harmony");
|
pub registry_project: String,
|
||||||
pub static ref REGISTRY_URL: String =
|
pub dry_run: bool,
|
||||||
std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string());
|
pub run_upgrades: bool,
|
||||||
pub static ref REGISTRY_PROJECT: String =
|
}
|
||||||
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
|
|
||||||
pub static ref DRY_RUN: bool =
|
impl Default for Config {
|
||||||
std::env::var("HARMONY_DRY_RUN").map_or(true, |value| value.parse().unwrap_or(true));
|
fn default() -> Self {
|
||||||
|
Config {
|
||||||
|
data_dir: directories::BaseDirs::new()
|
||||||
|
.unwrap()
|
||||||
|
.data_dir()
|
||||||
|
.join("harmony"),
|
||||||
|
registry_url: "hub.nationtech.io".to_string(),
|
||||||
|
registry_project: "harmony".to_string(),
|
||||||
|
dry_run: true,
|
||||||
|
run_upgrades: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
pub fn load() -> Result<Self, figment::Error> {
|
||||||
|
Figment::from(Config::default())
|
||||||
|
.merge(Env::prefixed("HARMONY_"))
|
||||||
|
.extract()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from<T: Provider>(provider: T) -> Result<Config, Error> {
|
||||||
|
Figment::from(provider).extract()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn figment() -> Figment {
|
||||||
|
use figment::providers::Env;
|
||||||
|
|
||||||
|
// In reality, whatever the library desires.
|
||||||
|
Figment::from(Config::default()).merge(Env::prefixed("HARMONY_"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Provider for Config {
|
||||||
|
fn metadata(&self) -> Metadata {
|
||||||
|
Metadata::named("Harmony Config")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
|
||||||
|
figment::providers::Serialized::defaults(Config::default()).data()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn profile(&self) -> Option<Profile> {
|
||||||
|
// Optionally, a profile that's selected by default.
|
||||||
|
Some(Profile::Default)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ pub enum InterpretName {
|
|||||||
K3dInstallation,
|
K3dInstallation,
|
||||||
TenantInterpret,
|
TenantInterpret,
|
||||||
Application,
|
Application,
|
||||||
|
ArgoCD,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for InterpretName {
|
impl std::fmt::Display for InterpretName {
|
||||||
@@ -39,6 +40,7 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
|
InterpretName::K3dInstallation => f.write_str("K3dInstallation"),
|
||||||
InterpretName::TenantInterpret => f.write_str("Tenant"),
|
InterpretName::TenantInterpret => f.write_str("Tenant"),
|
||||||
InterpretName::Application => f.write_str("Application"),
|
InterpretName::Application => f.write_str("Application"),
|
||||||
|
InterpretName::ArgoCD => f.write_str("ArgoCD"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
59
harmony/src/domain/score_with_dep.rs
Normal file
59
harmony/src/domain/score_with_dep.rs
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
////////////////////
|
||||||
|
/// Working idea
|
||||||
|
///
|
||||||
|
///
|
||||||
|
trait ScoreWithDep<T> {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
|
||||||
|
fn name(&self) -> String;
|
||||||
|
fn get_dependencies(&self) -> Vec<TypeId>; // Force T to impl Installer<TypeId> or something
|
||||||
|
// like that
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PrometheusAlertScore;
|
||||||
|
|
||||||
|
impl <T> ScoreWithDep<T> for PrometheusAlertScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_dependencies(&self) -> Vec<TypeId> {
|
||||||
|
// We have to find a way to constrait here so at compile time we are only allowed to return
|
||||||
|
// TypeId for types which can be installed by T
|
||||||
|
//
|
||||||
|
// This means, for example that T must implement HelmCommand if the impl <T: HelmCommand> Installable<T> for
|
||||||
|
// KubePrometheus calls for HelmCommand.
|
||||||
|
vec![TypeId::of::<KubePrometheus>()]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trait Installable{}
|
||||||
|
|
||||||
|
struct KubePrometheus;
|
||||||
|
|
||||||
|
impl Installable for KubePrometheus;
|
||||||
|
|
||||||
|
|
||||||
|
struct Maestro<T> {
|
||||||
|
topology: T
|
||||||
|
}
|
||||||
|
|
||||||
|
impl <T>Maestro<T> {
|
||||||
|
fn execute_store(&self, score: ScoreWithDep<T>) {
|
||||||
|
score.get_dependencies().iter().for_each(|dep| {
|
||||||
|
self.topology.ensure_dependency_ready(dep);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TopologyWithDep {
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TopologyWithDep {
|
||||||
|
fn ensure_dependency_ready(&self, type_id: TypeId) -> Result<(), String> {
|
||||||
|
self.installer
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,11 +1,15 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::net::MacAddress;
|
use harmony_types::net::MacAddress;
|
||||||
|
use log::error;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
|
use crate::config::Config;
|
||||||
use crate::executors::ExecutorError;
|
use crate::executors::ExecutorError;
|
||||||
use crate::interpret::InterpretError;
|
use crate::interpret::InterpretError;
|
||||||
use crate::interpret::Outcome;
|
use crate::interpret::Outcome;
|
||||||
|
use crate::inventory::Inventory;
|
||||||
|
use crate::topology::upgradeable::Upgradeable;
|
||||||
|
|
||||||
use super::DHCPStaticEntry;
|
use super::DHCPStaticEntry;
|
||||||
use super::DhcpServer;
|
use super::DhcpServer;
|
||||||
@@ -25,9 +29,12 @@ use super::TftpServer;
|
|||||||
use super::Topology;
|
use super::Topology;
|
||||||
use super::Url;
|
use super::Url;
|
||||||
use super::k8s::K8sClient;
|
use super::k8s::K8sClient;
|
||||||
|
use std::fmt::Debug;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct HAClusterTopology {
|
pub struct HAClusterTopology {
|
||||||
pub domain_name: String,
|
pub domain_name: String,
|
||||||
pub router: Arc<dyn Router>,
|
pub router: Arc<dyn Router>,
|
||||||
@@ -49,9 +56,15 @@ impl Topology for HAClusterTopology {
|
|||||||
"HAClusterTopology"
|
"HAClusterTopology"
|
||||||
}
|
}
|
||||||
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
|
||||||
todo!(
|
error!(
|
||||||
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
|
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
|
||||||
)
|
);
|
||||||
|
let config = Config::load().expect("couldn't load config");
|
||||||
|
|
||||||
|
if config.run_upgrades {
|
||||||
|
self.upgrade(&Inventory::empty(), self).await?;
|
||||||
|
}
|
||||||
|
Ok(Outcome::success("for now do nothing".to_string()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,6 +264,13 @@ impl Topology for DummyInfra {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology> Upgradeable<T> for DummyInfra {
|
||||||
|
async fn upgrade(&self, _inventory: &Inventory, _topology: &T) -> Result<(), InterpretError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";
|
const UNIMPLEMENTED_DUMMY_INFRA: &str = "This is a dummy infrastructure, no operation is supported";
|
||||||
|
|
||||||
impl Router for DummyInfra {
|
impl Router for DummyInfra {
|
||||||
@@ -417,3 +437,12 @@ impl DnsServer for DummyInfra {
|
|||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology> Upgradeable<T> for HAClusterTopology {
|
||||||
|
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
|
error!("TODO implement upgrades for all parts of the cluster");
|
||||||
|
self.firewall.upgrade(inventory, topology).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ use k8s_openapi::{
|
|||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{apps::v1::Deployment, core::v1::Pod},
|
api::{apps::v1::Deployment, core::v1::Pod},
|
||||||
};
|
};
|
||||||
use kube::runtime::conditions;
|
|
||||||
use kube::runtime::wait::await_condition;
|
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Error, Resource,
|
Client, Config, Error, Resource,
|
||||||
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
|
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||||
@@ -13,10 +11,17 @@ use kube::{
|
|||||||
core::ErrorResponse,
|
core::ErrorResponse,
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
};
|
};
|
||||||
|
use kube::{api::DynamicObject, runtime::conditions};
|
||||||
|
use kube::{
|
||||||
|
api::{ApiResource, GroupVersionKind},
|
||||||
|
runtime::wait::await_condition,
|
||||||
|
};
|
||||||
use log::{debug, error, trace};
|
use log::{debug, error, trace};
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use similar::{DiffableStr, TextDiff};
|
use similar::{DiffableStr, TextDiff};
|
||||||
|
|
||||||
|
use crate::config::Config as HarmonyConfig;
|
||||||
|
|
||||||
#[derive(new, Clone)]
|
#[derive(new, Clone)]
|
||||||
pub struct K8sClient {
|
pub struct K8sClient {
|
||||||
client: Client,
|
client: Client,
|
||||||
@@ -151,7 +156,9 @@ impl K8sClient {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.expect("K8s Resource should have a name");
|
.expect("K8s Resource should have a name");
|
||||||
|
|
||||||
if *crate::config::DRY_RUN {
|
let config = HarmonyConfig::load().expect("couldn't load config");
|
||||||
|
|
||||||
|
if config.dry_run {
|
||||||
match api.get(name).await {
|
match api.get(name).await {
|
||||||
Ok(current) => {
|
Ok(current) => {
|
||||||
trace!("Received current value {current:#?}");
|
trace!("Received current value {current:#?}");
|
||||||
@@ -239,6 +246,70 @@ impl K8sClient {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn apply_yaml_many(
|
||||||
|
&self,
|
||||||
|
yaml: &Vec<serde_yaml::Value>,
|
||||||
|
ns: Option<&str>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
for y in yaml.iter() {
|
||||||
|
self.apply_yaml(y, ns).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn apply_yaml(
|
||||||
|
&self,
|
||||||
|
yaml: &serde_yaml::Value,
|
||||||
|
ns: Option<&str>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let obj: DynamicObject = serde_yaml::from_value(yaml.clone()).expect("TODO do not unwrap");
|
||||||
|
let name = obj.metadata.name.as_ref().expect("YAML must have a name");
|
||||||
|
|
||||||
|
let api_version = yaml
|
||||||
|
.get("apiVersion")
|
||||||
|
.expect("couldn't get apiVersion from YAML")
|
||||||
|
.as_str()
|
||||||
|
.expect("couldn't get apiVersion as str");
|
||||||
|
let kind = yaml
|
||||||
|
.get("kind")
|
||||||
|
.expect("couldn't get kind from YAML")
|
||||||
|
.as_str()
|
||||||
|
.expect("couldn't get kind as str");
|
||||||
|
|
||||||
|
let split: Vec<&str> = api_version.splitn(2, "/").collect();
|
||||||
|
let g = split[0];
|
||||||
|
let v = split[1];
|
||||||
|
|
||||||
|
let gvk = GroupVersionKind::gvk(g, v, kind);
|
||||||
|
let api_resource = ApiResource::from_gvk(&gvk);
|
||||||
|
|
||||||
|
let namespace = match ns {
|
||||||
|
Some(n) => n,
|
||||||
|
None => obj
|
||||||
|
.metadata
|
||||||
|
.namespace
|
||||||
|
.as_ref()
|
||||||
|
.expect("YAML must have a namespace"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// 5. Create a dynamic API client for this resource type.
|
||||||
|
let api: Api<DynamicObject> =
|
||||||
|
Api::namespaced_with(self.client.clone(), namespace, &api_resource);
|
||||||
|
|
||||||
|
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||||
|
// This will create the resource if it doesn't exist, or update it if it does.
|
||||||
|
println!(
|
||||||
|
"Applying Argo Application '{}' in namespace '{}'...",
|
||||||
|
name, namespace
|
||||||
|
);
|
||||||
|
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
||||||
|
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||||
|
|
||||||
|
println!("Successfully applied '{}'.", result.name_any());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||||
let k = match Kubeconfig::read_from(path) {
|
let k = match Kubeconfig::read_from(path) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
use std::{process::Command, sync::Arc};
|
use std::{process::Command, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use figment::{Figment, providers::Env};
|
||||||
use inquire::Confirm;
|
use inquire::Confirm;
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -219,7 +220,7 @@ impl K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct K8sAnywhereConfig {
|
pub struct K8sAnywhereConfig {
|
||||||
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
|
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
|
||||||
/// cluster
|
/// cluster
|
||||||
@@ -246,25 +247,29 @@ pub struct K8sAnywhereConfig {
|
|||||||
///
|
///
|
||||||
/// default: true
|
/// default: true
|
||||||
pub use_local_k3d: bool,
|
pub use_local_k3d: bool,
|
||||||
harmony_profile: String,
|
pub profile: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for K8sAnywhereConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
kubeconfig: None,
|
||||||
|
use_system_kubeconfig: false,
|
||||||
|
autoinstall: false,
|
||||||
|
// TODO harmony_profile should be managed at a more core level than this
|
||||||
|
profile: "dev".to_string(),
|
||||||
|
use_local_k3d: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl K8sAnywhereConfig {
|
impl K8sAnywhereConfig {
|
||||||
fn from_env() -> Self {
|
fn from_env() -> Self {
|
||||||
Self {
|
Figment::new()
|
||||||
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
|
.merge(Env::prefixed("HARMONY_"))
|
||||||
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
|
.merge(Env::raw().only(&["KUBECONFIG"]))
|
||||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
.extract()
|
||||||
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
|
.expect("couldn't load config from env")
|
||||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
|
||||||
// TODO harmony_profile should be managed at a more core level than this
|
|
||||||
harmony_profile: std::env::var("HARMONY_PROFILE").map_or_else(
|
|
||||||
|_| "dev".to_string(),
|
|
||||||
|v| v.parse().ok().unwrap_or("dev".to_string()),
|
|
||||||
),
|
|
||||||
use_local_k3d: std::env::var("HARMONY_USE_LOCAL_K3D")
|
|
||||||
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(true)),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,7 +309,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
|
|||||||
return DeploymentTarget::LocalDev;
|
return DeploymentTarget::LocalDev;
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.config.harmony_profile.to_lowercase().as_str() {
|
match self.config.profile.to_lowercase().as_str() {
|
||||||
"staging" => DeploymentTarget::Staging,
|
"staging" => DeploymentTarget::Staging,
|
||||||
"production" => DeploymentTarget::Production,
|
"production" => DeploymentTarget::Production,
|
||||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
|
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ mod k8s_anywhere;
|
|||||||
mod localhost;
|
mod localhost;
|
||||||
pub mod oberservability;
|
pub mod oberservability;
|
||||||
pub mod tenant;
|
pub mod tenant;
|
||||||
|
pub mod upgradeable;
|
||||||
pub use k8s_anywhere::*;
|
pub use k8s_anywhere::*;
|
||||||
pub use localhost::*;
|
pub use localhost::*;
|
||||||
pub mod k8s;
|
pub mod k8s;
|
||||||
|
|||||||
@@ -2,9 +2,15 @@ use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::net::MacAddress;
|
use harmony_types::net::MacAddress;
|
||||||
|
use log::debug;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::executors::ExecutorError;
|
use crate::{
|
||||||
|
executors::ExecutorError,
|
||||||
|
interpret::InterpretError,
|
||||||
|
inventory::Inventory,
|
||||||
|
topology::{Topology, upgradeable::Upgradeable},
|
||||||
|
};
|
||||||
|
|
||||||
use super::{IpAddress, LogicalHost, k8s::K8sClient};
|
use super::{IpAddress, LogicalHost, k8s::K8sClient};
|
||||||
|
|
||||||
@@ -38,6 +44,15 @@ impl std::fmt::Debug for dyn Firewall {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// #[async_trait]
|
||||||
|
// impl<T: Topology> Upgradeable<T> for dyn Firewall {
|
||||||
|
// async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
|
// debug!("upgrading");
|
||||||
|
// self.upgrade(inventory, topology).await?;
|
||||||
|
// Ok(())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
pub struct NetworkDomain {
|
pub struct NetworkDomain {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
}
|
}
|
||||||
|
|||||||
8
harmony/src/domain/topology/upgradeable.rs
Normal file
8
harmony/src/domain/topology/upgradeable.rs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::{interpret::InterpretError, inventory::Inventory};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Upgradeable<T>: Send + Sync {
|
||||||
|
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>;
|
||||||
|
}
|
||||||
@@ -7,13 +7,18 @@ mod management;
|
|||||||
mod tftp;
|
mod tftp;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
pub use management::*;
|
pub use management::*;
|
||||||
use opnsense_config_xml::Host;
|
use opnsense_config_xml::Host;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
executors::ExecutorError,
|
executors::ExecutorError,
|
||||||
topology::{IpAddress, LogicalHost},
|
interpret::InterpretError,
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::opnsense::OPNSenseLaunchUpgrade,
|
||||||
|
score::Score,
|
||||||
|
topology::{IpAddress, LogicalHost, Topology, upgradeable::Upgradeable},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@@ -49,3 +54,17 @@ impl OPNSenseFirewall {
|
|||||||
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
|
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology> Upgradeable<T> for OPNSenseFirewall {
|
||||||
|
async fn upgrade(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
|
OPNSenseLaunchUpgrade {
|
||||||
|
opnsense: self.get_opnsense_config(),
|
||||||
|
}
|
||||||
|
.create_interpret()
|
||||||
|
.execute(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use serde::Serialize;
|
|||||||
|
|
||||||
use crate::topology::Topology;
|
use crate::topology::Topology;
|
||||||
|
|
||||||
use super::Application;
|
|
||||||
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
/// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
|
||||||
/// ContinuousIntegration, ContinuousDelivery
|
/// ContinuousIntegration, ContinuousDelivery
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
354
harmony/src/modules/application/features/argo_types.rs
Normal file
354
harmony/src/modules/application/features/argo_types.rs
Normal file
@@ -0,0 +1,354 @@
|
|||||||
|
use log::debug;
|
||||||
|
use serde::Serialize;
|
||||||
|
use serde_with::skip_serializing_none;
|
||||||
|
use serde_yaml::Value;
|
||||||
|
|
||||||
|
use crate::modules::application::features::CDApplicationConfig;
|
||||||
|
|
||||||
|
#[skip_serializing_none]
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Helm {
|
||||||
|
pub pass_credentials: Option<bool>,
|
||||||
|
pub parameters: Vec<Value>,
|
||||||
|
pub file_parameters: Vec<Value>,
|
||||||
|
pub release_name: Option<String>,
|
||||||
|
pub value_files: Vec<String>,
|
||||||
|
pub ignore_missing_value_files: Option<bool>,
|
||||||
|
pub values: Option<String>,
|
||||||
|
pub values_object: Option<Value>,
|
||||||
|
pub skip_crds: Option<bool>,
|
||||||
|
pub skip_schema_validation: Option<bool>,
|
||||||
|
pub version: Option<String>,
|
||||||
|
pub kube_version: Option<String>,
|
||||||
|
pub api_versions: Vec<String>,
|
||||||
|
pub namespace: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[skip_serializing_none]
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Source {
|
||||||
|
// Using string for this because URL enforces a URL scheme at the beginning but Helm, ArgoCD, etc do not, and it can be counterproductive,
|
||||||
|
// as the only way I've found to get OCI working isn't by using oci:// but rather no scheme at all
|
||||||
|
#[serde(rename = "repoURL")]
|
||||||
|
pub repo_url: String,
|
||||||
|
pub target_revision: Option<String>,
|
||||||
|
pub chart: String,
|
||||||
|
pub helm: Helm,
|
||||||
|
pub path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Automated {
|
||||||
|
pub prune: bool,
|
||||||
|
pub self_heal: bool,
|
||||||
|
pub allow_empty: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Backoff {
|
||||||
|
pub duration: String,
|
||||||
|
pub factor: u32,
|
||||||
|
pub max_duration: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Retry {
|
||||||
|
pub limit: u32,
|
||||||
|
pub backoff: Backoff,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SyncPolicy {
|
||||||
|
pub automated: Automated,
|
||||||
|
pub sync_options: Vec<String>,
|
||||||
|
pub retry: Retry,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[skip_serializing_none]
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ArgoApplication {
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: Option<String>,
|
||||||
|
pub project: String,
|
||||||
|
pub source: Source,
|
||||||
|
pub sync_policy: SyncPolicy,
|
||||||
|
pub revision_history_limit: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ArgoApplication {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
name: Default::default(),
|
||||||
|
namespace: Default::default(),
|
||||||
|
project: Default::default(),
|
||||||
|
source: Source {
|
||||||
|
repo_url: "http://asdf".to_string(),
|
||||||
|
target_revision: None,
|
||||||
|
chart: "".to_string(),
|
||||||
|
helm: Helm {
|
||||||
|
pass_credentials: None,
|
||||||
|
parameters: vec![],
|
||||||
|
file_parameters: vec![],
|
||||||
|
release_name: None,
|
||||||
|
value_files: vec![],
|
||||||
|
ignore_missing_value_files: None,
|
||||||
|
values: None,
|
||||||
|
values_object: None,
|
||||||
|
skip_crds: None,
|
||||||
|
skip_schema_validation: None,
|
||||||
|
version: None,
|
||||||
|
kube_version: None,
|
||||||
|
api_versions: vec![],
|
||||||
|
namespace: None,
|
||||||
|
},
|
||||||
|
path: "".to_string(),
|
||||||
|
},
|
||||||
|
sync_policy: SyncPolicy {
|
||||||
|
automated: Automated {
|
||||||
|
prune: false,
|
||||||
|
self_heal: false,
|
||||||
|
allow_empty: false,
|
||||||
|
},
|
||||||
|
sync_options: vec![],
|
||||||
|
retry: Retry {
|
||||||
|
limit: 5,
|
||||||
|
backoff: Backoff {
|
||||||
|
duration: "5s".to_string(),
|
||||||
|
factor: 2,
|
||||||
|
max_duration: "3m".to_string(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
revision_history_limit: 10,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<CDApplicationConfig> for ArgoApplication {
|
||||||
|
fn from(value: CDApplicationConfig) -> Self {
|
||||||
|
Self {
|
||||||
|
name: value.name,
|
||||||
|
namespace: Some(value.namespace),
|
||||||
|
project: "default".to_string(),
|
||||||
|
source: Source {
|
||||||
|
repo_url: value.helm_chart_repo_url,
|
||||||
|
target_revision: Some(value.version.to_string()),
|
||||||
|
chart: value.helm_chart_name.clone(),
|
||||||
|
path: value.helm_chart_name,
|
||||||
|
helm: Helm {
|
||||||
|
pass_credentials: None,
|
||||||
|
parameters: vec![],
|
||||||
|
file_parameters: vec![],
|
||||||
|
release_name: None,
|
||||||
|
value_files: vec![],
|
||||||
|
ignore_missing_value_files: None,
|
||||||
|
values: None,
|
||||||
|
values_object: value.values_overrides,
|
||||||
|
skip_crds: None,
|
||||||
|
skip_schema_validation: None,
|
||||||
|
version: None,
|
||||||
|
kube_version: None,
|
||||||
|
api_versions: vec![],
|
||||||
|
namespace: None,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
sync_policy: SyncPolicy {
|
||||||
|
automated: Automated {
|
||||||
|
prune: false,
|
||||||
|
self_heal: false,
|
||||||
|
allow_empty: true,
|
||||||
|
},
|
||||||
|
sync_options: vec![],
|
||||||
|
retry: Retry {
|
||||||
|
limit: 5,
|
||||||
|
backoff: Backoff {
|
||||||
|
duration: "5s".to_string(),
|
||||||
|
factor: 2,
|
||||||
|
max_duration: "3m".to_string(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
..Self::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ArgoApplication {
|
||||||
|
pub fn to_yaml(&self) -> serde_yaml::Value {
|
||||||
|
let name = &self.name;
|
||||||
|
let namespace = if let Some(ns) = self.namespace.as_ref() {
|
||||||
|
&ns
|
||||||
|
} else {
|
||||||
|
"argocd"
|
||||||
|
};
|
||||||
|
let project = &self.project;
|
||||||
|
let source = &self.source;
|
||||||
|
|
||||||
|
let yaml_str = format!(
|
||||||
|
r#"
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: {name}
|
||||||
|
# You'll usually want to add your resources to the argocd namespace.
|
||||||
|
namespace: {namespace}
|
||||||
|
spec:
|
||||||
|
# The project the application belongs to.
|
||||||
|
project: {project}
|
||||||
|
|
||||||
|
# Destination cluster and namespace to deploy the application
|
||||||
|
destination:
|
||||||
|
# cluster API URL
|
||||||
|
server: https://kubernetes.default.svc
|
||||||
|
# or cluster name
|
||||||
|
# name: in-cluster
|
||||||
|
# The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace
|
||||||
|
namespace: {namespace}
|
||||||
|
|
||||||
|
"#
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut yaml_value: Value =
|
||||||
|
serde_yaml::from_str(yaml_str.as_str()).expect("couldn't parse string to YAML");
|
||||||
|
|
||||||
|
let spec = yaml_value
|
||||||
|
.get_mut("spec")
|
||||||
|
.expect("couldn't get spec from yaml")
|
||||||
|
.as_mapping_mut()
|
||||||
|
.expect("couldn't unwrap spec as mutable mapping");
|
||||||
|
|
||||||
|
let source =
|
||||||
|
serde_yaml::to_value(&self.source).expect("couldn't serialize source to value");
|
||||||
|
let sync_policy = serde_yaml::to_value(&self.sync_policy)
|
||||||
|
.expect("couldn't serialize sync_policy to value");
|
||||||
|
let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit)
|
||||||
|
.expect("couldn't serialize revision_history_limit to value");
|
||||||
|
|
||||||
|
spec.insert(
|
||||||
|
serde_yaml::to_value("source").expect("string to value failed"),
|
||||||
|
source,
|
||||||
|
);
|
||||||
|
spec.insert(
|
||||||
|
serde_yaml::to_value("syncPolicy").expect("string to value failed"),
|
||||||
|
sync_policy,
|
||||||
|
);
|
||||||
|
spec.insert(
|
||||||
|
serde_yaml::to_value("revisionHistoryLimit")
|
||||||
|
.expect("couldn't convert str to yaml value"),
|
||||||
|
revision_history_limit,
|
||||||
|
);
|
||||||
|
|
||||||
|
debug!("spec: {}", serde_yaml::to_string(spec).unwrap());
|
||||||
|
debug!(
|
||||||
|
"entire yaml_value: {}",
|
||||||
|
serde_yaml::to_string(&yaml_value).unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
yaml_value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
|
use crate::modules::application::features::{
|
||||||
|
ArgoApplication, Automated, Backoff, Helm, Retry, Source, SyncPolicy,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_argo_application_to_yaml_happy_path() {
|
||||||
|
let app = ArgoApplication {
|
||||||
|
name: "test".to_string(),
|
||||||
|
namespace: Some("test-ns".to_string()),
|
||||||
|
project: "test-project".to_string(),
|
||||||
|
source: Source {
|
||||||
|
repo_url: "http://test".to_string(),
|
||||||
|
target_revision: None,
|
||||||
|
chart: "test-chart".to_string(),
|
||||||
|
helm: Helm {
|
||||||
|
pass_credentials: None,
|
||||||
|
parameters: vec![],
|
||||||
|
file_parameters: vec![],
|
||||||
|
release_name: Some("test-release-neame".to_string()),
|
||||||
|
value_files: vec![],
|
||||||
|
ignore_missing_value_files: None,
|
||||||
|
values: None,
|
||||||
|
values_object: None,
|
||||||
|
skip_crds: None,
|
||||||
|
skip_schema_validation: None,
|
||||||
|
version: None,
|
||||||
|
kube_version: None,
|
||||||
|
api_versions: vec![],
|
||||||
|
namespace: None,
|
||||||
|
},
|
||||||
|
path: "".to_string(),
|
||||||
|
},
|
||||||
|
sync_policy: SyncPolicy {
|
||||||
|
automated: Automated {
|
||||||
|
prune: false,
|
||||||
|
self_heal: false,
|
||||||
|
allow_empty: false,
|
||||||
|
},
|
||||||
|
sync_options: vec![],
|
||||||
|
retry: Retry {
|
||||||
|
limit: 5,
|
||||||
|
backoff: Backoff {
|
||||||
|
duration: "5s".to_string(),
|
||||||
|
factor: 2,
|
||||||
|
max_duration: "3m".to_string(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
revision_history_limit: 10,
|
||||||
|
};
|
||||||
|
|
||||||
|
let expected_yaml_output = r#"apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
namespace: test-ns
|
||||||
|
spec:
|
||||||
|
project: test-project
|
||||||
|
destination:
|
||||||
|
server: https://kubernetes.default.svc
|
||||||
|
namespace: test-ns
|
||||||
|
source:
|
||||||
|
repoURL: http://test
|
||||||
|
chart: test-chart
|
||||||
|
helm:
|
||||||
|
parameters: []
|
||||||
|
fileParameters: []
|
||||||
|
releaseName: test-release-neame
|
||||||
|
valueFiles: []
|
||||||
|
apiVersions: []
|
||||||
|
path: ''
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: false
|
||||||
|
selfHeal: false
|
||||||
|
allowEmpty: false
|
||||||
|
syncOptions: []
|
||||||
|
retry:
|
||||||
|
limit: 5
|
||||||
|
backoff:
|
||||||
|
duration: 5s
|
||||||
|
factor: 2
|
||||||
|
maxDuration: 3m
|
||||||
|
revisionHistoryLimit: 10"#;
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
expected_yaml_output.trim(),
|
||||||
|
serde_yaml::to_string(&app.clone().to_yaml())
|
||||||
|
.unwrap()
|
||||||
|
.trim()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,19 +2,19 @@ use std::{io::Write, process::Command, sync::Arc};
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::{error, info};
|
use log::{error, info};
|
||||||
use serde_json::Value;
|
use serde_yaml::Value;
|
||||||
use tempfile::NamedTempFile;
|
use tempfile::NamedTempFile;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::HARMONY_DATA_DIR,
|
config::Config,
|
||||||
data::Version,
|
data::Version,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::application::{
|
||||||
application::{Application, ApplicationFeature, HelmPackage, OCICompliant},
|
Application, ApplicationFeature, HelmPackage, OCICompliant,
|
||||||
helm::chart::HelmChartScore,
|
features::{ArgoApplication, ArgoHelmScore},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{DeploymentTarget, HelmCommand, MultiTargetTopology, Topology, Url},
|
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// ContinuousDelivery in Harmony provides this functionality :
|
/// ContinuousDelivery in Harmony provides this functionality :
|
||||||
@@ -56,12 +56,14 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
|||||||
chart_url: String,
|
chart_url: String,
|
||||||
image_name: String,
|
image_name: String,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
|
let config = Config::load().expect("couldn't load config");
|
||||||
|
|
||||||
error!(
|
error!(
|
||||||
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
|
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
|
||||||
);
|
);
|
||||||
|
|
||||||
error!("TODO hardcoded k3d bin path is wrong");
|
error!("TODO hardcoded k3d bin path is wrong");
|
||||||
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
|
let k3d_bin_path = config.data_dir.join("k3d").join("k3d");
|
||||||
// --- 1. Import the container image into the k3d cluster ---
|
// --- 1. Import the container image into the k3d cluster ---
|
||||||
info!(
|
info!(
|
||||||
"Importing image '{}' into k3d cluster 'harmony'",
|
"Importing image '{}' into k3d cluster 'harmony'",
|
||||||
@@ -139,7 +141,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<
|
impl<
|
||||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
A: OCICompliant + HelmPackage + Clone + 'static,
|
||||||
T: Topology + HelmCommand + MultiTargetTopology + 'static,
|
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
|
||||||
> ApplicationFeature<T> for ContinuousDelivery<A>
|
> ApplicationFeature<T> for ContinuousDelivery<A>
|
||||||
{
|
{
|
||||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
@@ -150,12 +152,17 @@ impl<
|
|||||||
"TODO reverse helm chart packaging and docker image build. I put helm package first for faster iterations"
|
"TODO reverse helm chart packaging and docker image build. I put helm package first for faster iterations"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// TODO Write CI/CD workflow files
|
||||||
|
// we can autotedect the CI type using the remote url (default to github action for github
|
||||||
|
// url, etc..)
|
||||||
|
// Or ask for it when unknown
|
||||||
|
|
||||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||||
info!("Pushed new helm chart {helm_chart}");
|
info!("Pushed new helm chart {helm_chart}");
|
||||||
|
|
||||||
// let image = self.application.build_push_oci_image().await?;
|
error!("TODO Make building image configurable/skippable");
|
||||||
// info!("Pushed new docker image {image}");
|
let image = self.application.build_push_oci_image().await?;
|
||||||
error!("uncomment above");
|
info!("Pushed new docker image {image}");
|
||||||
|
|
||||||
info!("Installing ContinuousDelivery feature");
|
info!("Installing ContinuousDelivery feature");
|
||||||
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
||||||
@@ -178,31 +185,28 @@ impl<
|
|||||||
}
|
}
|
||||||
target => {
|
target => {
|
||||||
info!("Deploying to target {target:?}");
|
info!("Deploying to target {target:?}");
|
||||||
let cd_server = HelmChartScore {
|
let score = ArgoHelmScore {
|
||||||
namespace: todo!(
|
namespace: "harmonydemo-staging".to_string(),
|
||||||
"ArgoCD Helm chart with proper understanding of Tenant, see how Will did it for Monitoring for now"
|
openshift: false,
|
||||||
),
|
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
|
||||||
release_name: todo!("argocd helm chart whatever"),
|
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||||
chart_name: todo!(),
|
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||||
chart_version: todo!(),
|
version: Version::from("0.1.0").unwrap(),
|
||||||
values_overrides: todo!(),
|
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||||
values_yaml: todo!(),
|
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
||||||
create_namespace: todo!(),
|
values_overrides: None,
|
||||||
install_only: todo!(),
|
name: "harmony-demo-rust-webapp".to_string(),
|
||||||
repository: todo!(),
|
namespace: "harmonydemo-staging".to_string(),
|
||||||
|
})],
|
||||||
};
|
};
|
||||||
let interpret = cd_server.create_interpret();
|
score
|
||||||
interpret.execute(&Inventory::empty(), topology);
|
.create_interpret()
|
||||||
|
.execute(&Inventory::empty(), topology)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
Ok(())
|
||||||
todo!("1. Create ArgoCD score that installs argo using helm chart, see if Taha's already done it
|
|
||||||
- [X] Package app (docker image, helm chart)
|
|
||||||
- [X] Push to registry
|
|
||||||
- [ ] Push only if staging or prod
|
|
||||||
- [ ] Deploy to local k3d when target is local
|
|
||||||
- [ ] Poke Argo
|
|
||||||
- [ ] Ensure app is up")
|
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"ContinuousDelivery".to_string()
|
"ContinuousDelivery".to_string()
|
||||||
@@ -212,9 +216,12 @@ impl<
|
|||||||
/// For now this is entirely bound to K8s / ArgoCD, will have to be revisited when we support
|
/// For now this is entirely bound to K8s / ArgoCD, will have to be revisited when we support
|
||||||
/// more CD systems
|
/// more CD systems
|
||||||
pub struct CDApplicationConfig {
|
pub struct CDApplicationConfig {
|
||||||
version: Version,
|
pub version: Version,
|
||||||
helm_chart_url: Url,
|
pub helm_chart_repo_url: String,
|
||||||
values_overrides: Value,
|
pub helm_chart_name: String,
|
||||||
|
pub values_overrides: Option<Value>,
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait ContinuousDeliveryApplication {
|
pub trait ContinuousDeliveryApplication {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use async_trait::async_trait;
|
|||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::application::{Application, ApplicationFeature},
|
modules::application::ApplicationFeature,
|
||||||
topology::{K8sclient, Topology},
|
topology::{K8sclient, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
1003
harmony/src/modules/application/features/helm_argocd_score.rs
Normal file
1003
harmony/src/modules/application/features/helm_argocd_score.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -6,3 +6,9 @@ pub use monitoring::*;
|
|||||||
|
|
||||||
mod continuous_delivery;
|
mod continuous_delivery;
|
||||||
pub use continuous_delivery::*;
|
pub use continuous_delivery::*;
|
||||||
|
|
||||||
|
mod helm_argocd_score;
|
||||||
|
pub use helm_argocd_score::*;
|
||||||
|
|
||||||
|
mod argo_types;
|
||||||
|
pub use argo_types::*;
|
||||||
|
|||||||
@@ -1,19 +1,109 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::info;
|
use base64::{Engine as _, engine::general_purpose};
|
||||||
|
use log::{debug, info};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::application::{Application, ApplicationFeature},
|
inventory::Inventory,
|
||||||
topology::{HelmCommand, Topology},
|
modules::{
|
||||||
|
application::{ApplicationFeature, OCICompliant},
|
||||||
|
monitoring::{
|
||||||
|
alert_channel::webhook_receiver::WebhookReceiver,
|
||||||
|
kube_prometheus::{
|
||||||
|
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||||
|
types::{NamespaceSelector, ServiceMonitor},
|
||||||
|
},
|
||||||
|
ntfy::ntfy::NtfyScore,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Monitoring {}
|
pub struct Monitoring {
|
||||||
|
pub application: Arc<dyn OCICompliant>,
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + HelmCommand + 'static> ApplicationFeature<T> for Monitoring {
|
impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> ApplicationFeature<T>
|
||||||
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
|
for Monitoring
|
||||||
|
{
|
||||||
|
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||||
info!("Ensuring monitoring is available for application");
|
info!("Ensuring monitoring is available for application");
|
||||||
todo!("create and execute k8s prometheus score, depends on Will's work")
|
|
||||||
|
let ntfy = NtfyScore {
|
||||||
|
// namespace: topology
|
||||||
|
// .get_tenant_config()
|
||||||
|
// .await
|
||||||
|
// .expect("couldn't get tenant config")
|
||||||
|
// .name,
|
||||||
|
namespace: self.application.name(),
|
||||||
|
host: "localhost".to_string(),
|
||||||
|
};
|
||||||
|
ntfy.create_interpret()
|
||||||
|
.execute(&Inventory::empty(), topology)
|
||||||
|
.await
|
||||||
|
.expect("couldn't create interpret for ntfy");
|
||||||
|
|
||||||
|
let ntfy_default_auth_username = "harmony";
|
||||||
|
let ntfy_default_auth_password = "harmony";
|
||||||
|
let ntfy_default_auth_header = format!(
|
||||||
|
"Basic {}",
|
||||||
|
general_purpose::STANDARD.encode(format!(
|
||||||
|
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||||
|
))
|
||||||
|
);
|
||||||
|
|
||||||
|
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||||
|
|
||||||
|
let ntfy_default_auth_param = general_purpose::STANDARD
|
||||||
|
.encode(ntfy_default_auth_header)
|
||||||
|
.replace("=", "");
|
||||||
|
|
||||||
|
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||||
|
|
||||||
|
let ntfy_receiver = WebhookReceiver {
|
||||||
|
name: "ntfy-webhook".to_string(),
|
||||||
|
url: Url::Url(
|
||||||
|
url::Url::parse(
|
||||||
|
format!(
|
||||||
|
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||||
|
self.application.name()
|
||||||
|
)
|
||||||
|
.as_str(),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut service_monitor = ServiceMonitor::default();
|
||||||
|
service_monitor.namespace_selector = Some(NamespaceSelector {
|
||||||
|
any: true,
|
||||||
|
match_names: vec![],
|
||||||
|
});
|
||||||
|
|
||||||
|
service_monitor.name = "rust-webapp".to_string();
|
||||||
|
|
||||||
|
// let alerting_score = ApplicationPrometheusMonitoringScore {
|
||||||
|
// receivers: vec![Box::new(ntfy_receiver)],
|
||||||
|
// rules: vec![],
|
||||||
|
// service_monitors: vec![service_monitor],
|
||||||
|
// };
|
||||||
|
|
||||||
|
let alerting_score = HelmPrometheusAlertingScore {
|
||||||
|
receivers: vec![Box::new(ntfy_receiver)],
|
||||||
|
rules: vec![],
|
||||||
|
service_monitors: vec![service_monitor],
|
||||||
|
};
|
||||||
|
|
||||||
|
alerting_score
|
||||||
|
.create_interpret()
|
||||||
|
.execute(&Inventory::empty(), topology)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"Monitoring".to_string()
|
"Monitoring".to_string()
|
||||||
|
|||||||
@@ -23,13 +23,13 @@ pub trait Application: std::fmt::Debug + Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ApplicationInterpret<T: Topology + std::fmt::Debug> {
|
pub struct ApplicationInterpret<A: Application, T: Topology + std::fmt::Debug> {
|
||||||
features: Vec<Box<dyn ApplicationFeature<T>>>,
|
features: Vec<Box<dyn ApplicationFeature<T>>>,
|
||||||
application: Arc<Box<dyn Application>>,
|
application: Arc<A>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + std::fmt::Debug> Interpret<T> for ApplicationInterpret<T> {
|
impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for ApplicationInterpret<A, T> {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
@@ -59,9 +59,7 @@ impl<T: Topology + std::fmt::Debug> Interpret<T> for ApplicationInterpret<T> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
todo!(
|
Ok(Outcome::success("successfully created app".to_string()))
|
||||||
"Do I need to do anything more than this here?? I feel like the Application trait itself should expose something like ensure_ready but its becoming redundant. We'll see as this evolves."
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
|
|||||||
@@ -4,13 +4,17 @@ use std::process;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use bollard::query_parameters::PushImageOptionsBuilder;
|
||||||
|
use bollard::{Docker, body_full};
|
||||||
use dockerfile_builder::Dockerfile;
|
use dockerfile_builder::Dockerfile;
|
||||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||||
|
use futures_util::StreamExt;
|
||||||
use log::{debug, error, info};
|
use log::{debug, error, info};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
use tar::Archive;
|
||||||
|
|
||||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
use crate::config::Config;
|
||||||
use crate::{
|
use crate::{
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{Topology, Url},
|
topology::{Topology, Url},
|
||||||
@@ -19,23 +23,30 @@ use crate::{
|
|||||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Clone)]
|
#[derive(Debug, Serialize, Clone)]
|
||||||
pub struct RustWebappScore<T: Topology + Clone + Serialize> {
|
pub struct ApplicationScore<A: Application + Serialize, T: Topology + Clone + Serialize>
|
||||||
pub name: String,
|
where
|
||||||
pub domain: Url,
|
Arc<A>: Serialize + Clone,
|
||||||
|
{
|
||||||
pub features: Vec<Box<dyn ApplicationFeature<T>>>,
|
pub features: Vec<Box<dyn ApplicationFeature<T>>>,
|
||||||
pub application: RustWebapp,
|
pub application: Arc<A>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + std::fmt::Debug + Clone + Serialize + 'static> Score<T> for RustWebappScore<T> {
|
impl<
|
||||||
|
A: Application + Serialize + Clone + 'static,
|
||||||
|
T: Topology + std::fmt::Debug + Clone + Serialize + 'static,
|
||||||
|
> Score<T> for ApplicationScore<A, T>
|
||||||
|
where
|
||||||
|
Arc<A>: Serialize,
|
||||||
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
Box::new(ApplicationInterpret {
|
Box::new(ApplicationInterpret {
|
||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
application: Arc::new(Box::new(self.application.clone())),
|
application: self.application.clone(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
format!("{}-RustWebapp", self.name)
|
format!("Application: {}", self.application.name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,6 +58,7 @@ pub enum RustWebFramework {
|
|||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct RustWebapp {
|
pub struct RustWebapp {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
pub domain: Url,
|
||||||
/// The path to the root of the Rust project to be containerized.
|
/// The path to the root of the Rust project to be containerized.
|
||||||
pub project_root: PathBuf,
|
pub project_root: PathBuf,
|
||||||
pub framework: Option<RustWebFramework>,
|
pub framework: Option<RustWebFramework>,
|
||||||
@@ -100,6 +112,7 @@ impl OCICompliant for RustWebapp {
|
|||||||
// 1. Build the local image by calling the synchronous helper function.
|
// 1. Build the local image by calling the synchronous helper function.
|
||||||
let local_image_name = self.local_image_name();
|
let local_image_name = self.local_image_name();
|
||||||
self.build_docker_image(&local_image_name)
|
self.build_docker_image(&local_image_name)
|
||||||
|
.await
|
||||||
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
|
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
|
||||||
info!(
|
info!(
|
||||||
"Successfully built local Docker image: {}",
|
"Successfully built local Docker image: {}",
|
||||||
@@ -109,6 +122,7 @@ impl OCICompliant for RustWebapp {
|
|||||||
let remote_image_name = self.image_name();
|
let remote_image_name = self.image_name();
|
||||||
// 2. Push the image to the registry.
|
// 2. Push the image to the registry.
|
||||||
self.push_docker_image(&local_image_name, &remote_image_name)
|
self.push_docker_image(&local_image_name, &remote_image_name)
|
||||||
|
.await
|
||||||
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
|
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
|
||||||
info!("Successfully pushed Docker image to: {}", remote_image_name);
|
info!("Successfully pushed Docker image to: {}", remote_image_name);
|
||||||
|
|
||||||
@@ -120,10 +134,12 @@ impl OCICompliant for RustWebapp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn image_name(&self) -> String {
|
fn image_name(&self) -> String {
|
||||||
|
let config = Config::load().expect("couldn't load config");
|
||||||
|
|
||||||
format!(
|
format!(
|
||||||
"{}/{}/{}",
|
"{}/{}/{}",
|
||||||
*REGISTRY_URL,
|
config.registry_url,
|
||||||
*REGISTRY_PROJECT,
|
config.registry_project,
|
||||||
&self.local_image_name()
|
&self.local_image_name()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -145,66 +161,68 @@ impl RustWebapp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Builds the Docker image using the generated Dockerfile.
|
/// Builds the Docker image using the generated Dockerfile.
|
||||||
pub fn build_docker_image(
|
pub async fn build_docker_image(
|
||||||
&self,
|
&self,
|
||||||
image_name: &str,
|
image_name: &str,
|
||||||
) -> Result<String, Box<dyn std::error::Error>> {
|
) -> Result<String, Box<dyn std::error::Error>> {
|
||||||
info!("Generating Dockerfile for '{}'", self.name);
|
info!("Generating Dockerfile for '{}'", self.name);
|
||||||
let dockerfile_path = self.build_dockerfile()?;
|
let _dockerfile_path = self.build_dockerfile()?;
|
||||||
|
|
||||||
info!(
|
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||||
"Building Docker image with file {} from root {}",
|
|
||||||
dockerfile_path.to_string_lossy(),
|
let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default()
|
||||||
self.project_root.to_string_lossy()
|
.dockerfile("Dockerfile.harmony")
|
||||||
|
.t(image_name)
|
||||||
|
.q(false)
|
||||||
|
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
|
||||||
|
.platform("linux/x86_64");
|
||||||
|
|
||||||
|
let mut temp_tar_builder = tar::Builder::new(Vec::new());
|
||||||
|
let _ = temp_tar_builder
|
||||||
|
.append_dir_all("", self.project_root.clone())
|
||||||
|
.unwrap();
|
||||||
|
let archive = temp_tar_builder
|
||||||
|
.into_inner()
|
||||||
|
.expect("couldn't finish creating tar");
|
||||||
|
let archived_files = Archive::new(archive.as_slice())
|
||||||
|
.entries()
|
||||||
|
.unwrap()
|
||||||
|
.map(|entry| entry.unwrap().path().unwrap().into_owned())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
debug!("files in docker tar: {:#?}", archived_files);
|
||||||
|
|
||||||
|
let mut image_build_stream = docker.build_image(
|
||||||
|
build_image_options.build(),
|
||||||
|
None,
|
||||||
|
Some(body_full(archive.into())),
|
||||||
);
|
);
|
||||||
let output = process::Command::new("docker")
|
|
||||||
.args([
|
|
||||||
"build",
|
|
||||||
"--file",
|
|
||||||
dockerfile_path.to_str().unwrap(),
|
|
||||||
"-t",
|
|
||||||
&image_name,
|
|
||||||
self.project_root.to_str().unwrap(),
|
|
||||||
])
|
|
||||||
.spawn()?
|
|
||||||
.wait_with_output()?;
|
|
||||||
|
|
||||||
self.check_output(&output, "Failed to build Docker image")?;
|
while let Some(msg) = image_build_stream.next().await {
|
||||||
|
println!("Message: {msg:?}");
|
||||||
|
}
|
||||||
|
|
||||||
Ok(image_name.to_string())
|
Ok(image_name.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tags and pushes a Docker image to the configured remote registry.
|
/// Tags and pushes a Docker image to the configured remote registry.
|
||||||
fn push_docker_image(
|
async fn push_docker_image(
|
||||||
&self,
|
&self,
|
||||||
image_name: &str,
|
image_name: &str,
|
||||||
full_tag: &str,
|
full_tag: &str,
|
||||||
) -> Result<String, Box<dyn std::error::Error>> {
|
) -> Result<String, Box<dyn std::error::Error>> {
|
||||||
info!("Pushing docker image {full_tag}");
|
info!("Pushing docker image {full_tag}");
|
||||||
|
|
||||||
// Tag the image for the remote registry.
|
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||||
let output = process::Command::new("docker")
|
|
||||||
.args(["tag", image_name, &full_tag])
|
|
||||||
.spawn()?
|
|
||||||
.wait_with_output()?;
|
|
||||||
self.check_output(&output, "Tagging docker image failed")?;
|
|
||||||
debug!(
|
|
||||||
"docker tag output: stdout: {}, stderr: {}",
|
|
||||||
String::from_utf8_lossy(&output.stdout),
|
|
||||||
String::from_utf8_lossy(&output.stderr)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Push the image.
|
// let push_options = PushImageOptionsBuilder::new().tag(tag);
|
||||||
let output = process::Command::new("docker")
|
|
||||||
.args(["push", &full_tag])
|
let mut push_image_stream =
|
||||||
.spawn()?
|
docker.push_image(full_tag, Some(PushImageOptionsBuilder::new().build()), None);
|
||||||
.wait_with_output()?;
|
|
||||||
self.check_output(&output, "Pushing docker image failed")?;
|
while let Some(msg) = push_image_stream.next().await {
|
||||||
debug!(
|
println!("Message: {msg:?}");
|
||||||
"docker push output: stdout: {}, stderr: {}",
|
}
|
||||||
String::from_utf8_lossy(&output.stdout),
|
|
||||||
String::from_utf8_lossy(&output.stderr)
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(full_tag.to_string())
|
Ok(full_tag.to_string())
|
||||||
}
|
}
|
||||||
@@ -341,7 +359,11 @@ impl RustWebapp {
|
|||||||
image_url: &str,
|
image_url: &str,
|
||||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||||
let chart_name = format!("{}-chart", self.name);
|
let chart_name = format!("{}-chart", self.name);
|
||||||
let chart_dir = self.project_root.join("helm").join(&chart_name);
|
let chart_dir = self
|
||||||
|
.project_root
|
||||||
|
.join(".harmony_generated")
|
||||||
|
.join("helm")
|
||||||
|
.join(&chart_name);
|
||||||
let templates_dir = chart_dir.join("templates");
|
let templates_dir = chart_dir.join("templates");
|
||||||
fs::create_dir_all(&templates_dir)?;
|
fs::create_dir_all(&templates_dir)?;
|
||||||
|
|
||||||
@@ -408,7 +430,7 @@ ingress:
|
|||||||
Expand the name of the chart.
|
Expand the name of the chart.
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "chart.name" -}}
|
{{- define "chart.name" -}}
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -416,7 +438,7 @@ Create a default fully qualified app name.
|
|||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "chart.fullname" -}}
|
{{- define "chart.fullname" -}}
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
{{- $name := default .Chart.Name $.Values.nameOverride }}
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
"#;
|
"#;
|
||||||
@@ -429,12 +451,12 @@ kind: Service
|
|||||||
metadata:
|
metadata:
|
||||||
name: {{ include "chart.fullname" . }}
|
name: {{ include "chart.fullname" . }}
|
||||||
spec:
|
spec:
|
||||||
type: {{ .Values.service.type }}
|
type: {{ $.Values.service.type }}
|
||||||
ports:
|
ports:
|
||||||
- port: {{ .Values.service.port }}
|
- name: main
|
||||||
targetPort: 3000
|
port: {{ $.Values.service.port | default 3000 }}
|
||||||
|
targetPort: {{ $.Values.service.port | default 3000 }}
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
name: http
|
|
||||||
selector:
|
selector:
|
||||||
app: {{ include "chart.name" . }}
|
app: {{ include "chart.name" . }}
|
||||||
"#;
|
"#;
|
||||||
@@ -447,7 +469,7 @@ kind: Deployment
|
|||||||
metadata:
|
metadata:
|
||||||
name: {{ include "chart.fullname" . }}
|
name: {{ include "chart.fullname" . }}
|
||||||
spec:
|
spec:
|
||||||
replicas: {{ .Values.replicaCount }}
|
replicas: {{ $.Values.replicaCount }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app: {{ include "chart.name" . }}
|
app: {{ include "chart.name" . }}
|
||||||
@@ -458,28 +480,28 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: {{ .Chart.Name }}
|
- name: {{ .Chart.Name }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag | default .Chart.AppVersion }}"
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ $.Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- name: http
|
- name: main
|
||||||
containerPort: 3000
|
containerPort: {{ $.Values.service.port | default 3000 }}
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
"#;
|
"#;
|
||||||
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
||||||
|
|
||||||
// Create templates/ingress.yaml
|
// Create templates/ingress.yaml
|
||||||
let ingress_yaml = r#"
|
let ingress_yaml = r#"
|
||||||
{{- if .Values.ingress.enabled -}}
|
{{- if $.Values.ingress.enabled -}}
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "chart.fullname" . }}
|
name: {{ include "chart.fullname" . }}
|
||||||
annotations:
|
annotations:
|
||||||
{{- toYaml .Values.ingress.annotations | nindent 4 }}
|
{{- toYaml $.Values.ingress.annotations | nindent 4 }}
|
||||||
spec:
|
spec:
|
||||||
{{- if .Values.ingress.tls }}
|
{{- if $.Values.ingress.tls }}
|
||||||
tls:
|
tls:
|
||||||
{{- range .Values.ingress.tls }}
|
{{- range $.Values.ingress.tls }}
|
||||||
- hosts:
|
- hosts:
|
||||||
{{- range .hosts }}
|
{{- range .hosts }}
|
||||||
- {{ . | quote }}
|
- {{ . | quote }}
|
||||||
@@ -488,7 +510,7 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
rules:
|
rules:
|
||||||
{{- range .Values.ingress.hosts }}
|
{{- range $.Values.ingress.hosts }}
|
||||||
- host: {{ .host | quote }}
|
- host: {{ .host | quote }}
|
||||||
http:
|
http:
|
||||||
paths:
|
paths:
|
||||||
@@ -499,7 +521,7 @@ spec:
|
|||||||
service:
|
service:
|
||||||
name: {{ include "chart.fullname" $ }}
|
name: {{ include "chart.fullname" $ }}
|
||||||
port:
|
port:
|
||||||
number: 3000
|
number: {{ $.Values.service.port | default 3000 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -518,11 +540,15 @@ spec:
|
|||||||
info!(
|
info!(
|
||||||
"Launching `helm package {}` cli with CWD {}",
|
"Launching `helm package {}` cli with CWD {}",
|
||||||
chart_dirname.to_string_lossy(),
|
chart_dirname.to_string_lossy(),
|
||||||
&self.project_root.join("helm").to_string_lossy()
|
&self
|
||||||
|
.project_root
|
||||||
|
.join(".harmony_generated")
|
||||||
|
.join("helm")
|
||||||
|
.to_string_lossy()
|
||||||
);
|
);
|
||||||
let output = process::Command::new("helm")
|
let output = process::Command::new("helm")
|
||||||
.args(["package", chart_dirname.to_str().unwrap()])
|
.args(["package", chart_dirname.to_str().unwrap()])
|
||||||
.current_dir(&self.project_root.join("helm")) // Run package from the parent dir
|
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
|
||||||
.output()?;
|
.output()?;
|
||||||
|
|
||||||
self.check_output(&output, "Failed to package Helm chart")?;
|
self.check_output(&output, "Failed to package Helm chart")?;
|
||||||
@@ -539,7 +565,11 @@ spec:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The output from helm is relative, so we join it with the execution directory.
|
// The output from helm is relative, so we join it with the execution directory.
|
||||||
Ok(self.project_root.join("helm").join(tgz_name))
|
Ok(self
|
||||||
|
.project_root
|
||||||
|
.join(".harmony_generated")
|
||||||
|
.join("helm")
|
||||||
|
.join(tgz_name))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pushes a packaged Helm chart to an OCI registry.
|
/// Pushes a packaged Helm chart to an OCI registry.
|
||||||
@@ -547,9 +577,11 @@ spec:
|
|||||||
&self,
|
&self,
|
||||||
packaged_chart_path: &PathBuf,
|
packaged_chart_path: &PathBuf,
|
||||||
) -> Result<String, Box<dyn std::error::Error>> {
|
) -> Result<String, Box<dyn std::error::Error>> {
|
||||||
|
let config = Config::load().expect("couldn't load config");
|
||||||
|
|
||||||
// The chart name is the file stem of the .tgz file
|
// The chart name is the file stem of the .tgz file
|
||||||
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();
|
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();
|
||||||
let oci_push_url = format!("oci://{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT);
|
let oci_push_url = format!("oci://{}/{}", config.registry_url, config.registry_project);
|
||||||
let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name);
|
let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name);
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
|
|||||||
@@ -10,14 +10,25 @@ use crate::{
|
|||||||
topology::{HttpServer, Topology, Url},
|
topology::{HttpServer, Topology, Url},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Configure an HTTP server that is provided by the Topology
|
||||||
|
///
|
||||||
|
/// This Score will let you easily specify a file path to be served by the HTTP server
|
||||||
|
///
|
||||||
|
/// For example, if you have a folder of assets at `/var/www/assets` simply do :
|
||||||
|
///
|
||||||
|
/// ```rust,ignore
|
||||||
|
/// StaticFilesHttpScore {
|
||||||
|
/// files_to_serve: url!("file:///var/www/assets"),
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
#[derive(Debug, new, Clone, Serialize)]
|
#[derive(Debug, new, Clone, Serialize)]
|
||||||
pub struct HttpScore {
|
pub struct StaticFilesHttpScore {
|
||||||
files_to_serve: Url,
|
files_to_serve: Url,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HttpServer> Score<T> for HttpScore {
|
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
Box::new(HttpInterpret::new(self.clone()))
|
Box::new(StaticFilesHttpInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
@@ -26,12 +37,12 @@ impl<T: Topology + HttpServer> Score<T> for HttpScore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, new, Clone)]
|
#[derive(Debug, new, Clone)]
|
||||||
pub struct HttpInterpret {
|
pub struct StaticFilesHttpInterpret {
|
||||||
score: HttpScore,
|
score: StaticFilesHttpScore,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + HttpServer> Interpret<T> for HttpInterpret {
|
impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use log::info;
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::HARMONY_DATA_DIR,
|
config::Config,
|
||||||
data::{Id, Version},
|
data::{Id, Version},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
@@ -21,8 +21,10 @@ pub struct K3DInstallationScore {
|
|||||||
|
|
||||||
impl Default for K3DInstallationScore {
|
impl Default for K3DInstallationScore {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
|
let config = Config::load().expect("couldn't load config");
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
installation_path: HARMONY_DATA_DIR.join("k3d"),
|
installation_path: config.data_dir.join("k3d"),
|
||||||
cluster_name: "harmony".to_string(),
|
cluster_name: "harmony".to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use async_trait::async_trait;
|
|||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
use crate::config::Config as HarmonyConfig;
|
||||||
use crate::modules::k8s::ingress::K8sIngressScore;
|
use crate::modules::k8s::ingress::K8sIngressScore;
|
||||||
use crate::topology::HelmCommand;
|
use crate::topology::HelmCommand;
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -355,7 +355,12 @@ opcache.fast_shutdown=1
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn push_docker_image(&self, image_name: &str) -> Result<String, Box<dyn std::error::Error>> {
|
fn push_docker_image(&self, image_name: &str) -> Result<String, Box<dyn std::error::Error>> {
|
||||||
let full_tag = format!("{}/{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT, &image_name);
|
let config = HarmonyConfig::load().expect("couldn't load config");
|
||||||
|
|
||||||
|
let full_tag = format!(
|
||||||
|
"{}/{}/{}",
|
||||||
|
config.registry_url, config.registry_project, &image_name
|
||||||
|
);
|
||||||
let output = std::process::Command::new("docker")
|
let output = std::process::Command::new("docker")
|
||||||
.args(["tag", image_name, &full_tag])
|
.args(["tag", image_name, &full_tag])
|
||||||
.output()?;
|
.output()?;
|
||||||
|
|||||||
@@ -4,9 +4,12 @@ use serde_yaml::{Mapping, Value};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::kube_prometheus::{
|
modules::monitoring::{
|
||||||
prometheus::{Prometheus, PrometheusReceiver},
|
kube_prometheus::{
|
||||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
prometheus::{KubePrometheus, KubePrometheusReceiver},
|
||||||
|
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||||
|
},
|
||||||
|
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||||
},
|
},
|
||||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||||
};
|
};
|
||||||
@@ -37,6 +40,26 @@ impl PrometheusReceiver for DiscordWebhook {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
||||||
|
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||||
|
sender.install_receiver(self).await
|
||||||
|
}
|
||||||
|
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
|
||||||
|
Box::new(self.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl KubePrometheusReceiver for DiscordWebhook {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
self.name.clone()
|
||||||
|
}
|
||||||
|
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
||||||
|
self.get_config().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertChannelConfig for DiscordWebhook {
|
impl AlertChannelConfig for DiscordWebhook {
|
||||||
async fn get_config(&self) -> AlertManagerChannelConfig {
|
async fn get_config(&self) -> AlertManagerChannelConfig {
|
||||||
|
|||||||
@@ -4,9 +4,12 @@ use serde_yaml::{Mapping, Value};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::kube_prometheus::{
|
modules::monitoring::{
|
||||||
prometheus::{Prometheus, PrometheusReceiver},
|
kube_prometheus::{
|
||||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
prometheus::{KubePrometheus, KubePrometheusReceiver},
|
||||||
|
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||||
|
},
|
||||||
|
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||||
},
|
},
|
||||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||||
};
|
};
|
||||||
@@ -36,6 +39,25 @@ impl PrometheusReceiver for WebhookReceiver {
|
|||||||
self.get_config().await
|
self.get_config().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[async_trait]
|
||||||
|
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
||||||
|
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||||
|
sender.install_receiver(self).await
|
||||||
|
}
|
||||||
|
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
|
||||||
|
Box::new(self.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl KubePrometheusReceiver for WebhookReceiver {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
self.name.clone()
|
||||||
|
}
|
||||||
|
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
||||||
|
self.get_config().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertChannelConfig for WebhookReceiver {
|
impl AlertChannelConfig for WebhookReceiver {
|
||||||
|
|||||||
@@ -5,13 +5,26 @@ use serde::Serialize;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::kube_prometheus::{
|
modules::monitoring::{
|
||||||
prometheus::{Prometheus, PrometheusRule},
|
kube_prometheus::{
|
||||||
types::{AlertGroup, AlertManagerAdditionalPromRules},
|
prometheus::{KubePrometheus, KubePrometheusRule},
|
||||||
|
types::{AlertGroup, AlertManagerAdditionalPromRules},
|
||||||
|
},
|
||||||
|
prometheus::prometheus::{Prometheus, PrometheusRule},
|
||||||
},
|
},
|
||||||
topology::oberservability::monitoring::AlertRule,
|
topology::oberservability::monitoring::AlertRule,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
|
||||||
|
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||||
|
sender.install_rule(&self).await
|
||||||
|
}
|
||||||
|
fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> {
|
||||||
|
Box::new(self.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
|
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
|
||||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||||
@@ -41,6 +54,25 @@ impl PrometheusRule for AlertManagerRuleGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[async_trait]
|
||||||
|
impl KubePrometheusRule for AlertManagerRuleGroup {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
self.name.clone()
|
||||||
|
}
|
||||||
|
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules {
|
||||||
|
let mut additional_prom_rules = BTreeMap::new();
|
||||||
|
|
||||||
|
additional_prom_rules.insert(
|
||||||
|
self.name.clone(),
|
||||||
|
AlertGroup {
|
||||||
|
groups: vec![self.clone()],
|
||||||
|
},
|
||||||
|
);
|
||||||
|
AlertManagerAdditionalPromRules {
|
||||||
|
rules: additional_prom_rules,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl AlertManagerRuleGroup {
|
impl AlertManagerRuleGroup {
|
||||||
pub fn new(name: &str, rules: Vec<PrometheusAlertRule>) -> AlertManagerRuleGroup {
|
pub fn new(name: &str, rules: Vec<PrometheusAlertRule>) -> AlertManagerRuleGroup {
|
||||||
|
|||||||
@@ -0,0 +1,44 @@
|
|||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
modules::monitoring::{
|
||||||
|
kube_prometheus::types::ServiceMonitor,
|
||||||
|
prometheus::{prometheus::Prometheus, prometheus_config::PrometheusConfig},
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{
|
||||||
|
HelmCommand, Topology,
|
||||||
|
oberservability::monitoring::{AlertReceiver, AlertRule, AlertingInterpret},
|
||||||
|
tenant::TenantManager,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ApplicationPrometheusMonitoringScore {
|
||||||
|
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
|
||||||
|
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
|
||||||
|
pub service_monitors: Vec<ServiceMonitor>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
|
let mut prom_config = PrometheusConfig::new();
|
||||||
|
prom_config.alert_manager = true;
|
||||||
|
|
||||||
|
let config = Arc::new(Mutex::new(prom_config));
|
||||||
|
config
|
||||||
|
.try_lock()
|
||||||
|
.expect("couldn't lock config")
|
||||||
|
.additional_service_monitors = self.service_monitors.clone();
|
||||||
|
Box::new(AlertingInterpret {
|
||||||
|
sender: Prometheus::new(),
|
||||||
|
receivers: self.receivers.clone(),
|
||||||
|
rules: self.rules.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"ApplicationPrometheusMonitoringScore".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
pub mod k8s_application_monitoring_score;
|
||||||
28
harmony/src/modules/monitoring/grafana/helm/helm_grafana.rs
Normal file
28
harmony/src/modules/monitoring/grafana/helm/helm_grafana.rs
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
use non_blank_string_rs::NonBlankString;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use crate::modules::helm::chart::HelmChartScore;
|
||||||
|
|
||||||
|
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
|
||||||
|
let values = format!(
|
||||||
|
r#"
|
||||||
|
rbac:
|
||||||
|
namespaced: true
|
||||||
|
sidecar:
|
||||||
|
dashboards:
|
||||||
|
enabled: true
|
||||||
|
"#
|
||||||
|
);
|
||||||
|
|
||||||
|
HelmChartScore {
|
||||||
|
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
||||||
|
release_name: NonBlankString::from_str("grafana").unwrap(),
|
||||||
|
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
|
||||||
|
chart_version: None,
|
||||||
|
values_overrides: None,
|
||||||
|
values_yaml: Some(values.to_string()),
|
||||||
|
create_namespace: true,
|
||||||
|
install_only: true,
|
||||||
|
repository: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
1
harmony/src/modules/monitoring/grafana/helm/mod.rs
Normal file
1
harmony/src/modules/monitoring/grafana/helm/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod helm_grafana;
|
||||||
1
harmony/src/modules/monitoring/grafana/mod.rs
Normal file
1
harmony/src/modules/monitoring/grafana/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod helm;
|
||||||
@@ -38,15 +38,15 @@ impl KubePrometheusConfig {
|
|||||||
node_exporter: false,
|
node_exporter: false,
|
||||||
prometheus: true,
|
prometheus: true,
|
||||||
kubernetes_service_monitors: true,
|
kubernetes_service_monitors: true,
|
||||||
kubernetes_api_server: false,
|
kubernetes_api_server: true,
|
||||||
kubelet: true,
|
kubelet: true,
|
||||||
kube_controller_manager: false,
|
kube_controller_manager: true,
|
||||||
kube_etcd: false,
|
kube_etcd: true,
|
||||||
kube_proxy: false,
|
kube_proxy: true,
|
||||||
kube_state_metrics: true,
|
kube_state_metrics: true,
|
||||||
prometheus_operator: true,
|
prometheus_operator: true,
|
||||||
core_dns: false,
|
core_dns: true,
|
||||||
kube_scheduler: false,
|
kube_scheduler: true,
|
||||||
alert_receiver_configs: vec![],
|
alert_receiver_configs: vec![],
|
||||||
alert_rules: vec![],
|
alert_rules: vec![],
|
||||||
additional_service_monitors: vec![],
|
additional_service_monitors: vec![],
|
||||||
|
|||||||
@@ -68,11 +68,14 @@ pub fn kube_prometheus_helm_chart_score(
|
|||||||
|
|
||||||
let mut values = format!(
|
let mut values = format!(
|
||||||
r#"
|
r#"
|
||||||
|
global:
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
prometheus:
|
prometheus:
|
||||||
enabled: {prometheus}
|
enabled: {prometheus}
|
||||||
prometheusSpec:
|
prometheusSpec:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 500Mi
|
memory: 500Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -118,7 +121,7 @@ defaultRules:
|
|||||||
windowsMonitoring:
|
windowsMonitoring:
|
||||||
enabled: {windows_monitoring}
|
enabled: {windows_monitoring}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -127,13 +130,13 @@ windowsMonitoring:
|
|||||||
grafana:
|
grafana:
|
||||||
enabled: {grafana}
|
enabled: {grafana}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
cpu: 200m
|
cpu: 200m
|
||||||
memory: 250Mi
|
memory: 250Mi
|
||||||
initChownData:
|
initChownData:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
@@ -154,7 +157,7 @@ kubernetesServiceMonitors:
|
|||||||
kubeApiServer:
|
kubeApiServer:
|
||||||
enabled: {kubernetes_api_server}
|
enabled: {kubernetes_api_server}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -163,7 +166,7 @@ kubeApiServer:
|
|||||||
kubelet:
|
kubelet:
|
||||||
enabled: {kubelet}
|
enabled: {kubelet}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -172,7 +175,7 @@ kubelet:
|
|||||||
kubeControllerManager:
|
kubeControllerManager:
|
||||||
enabled: {kube_controller_manager}
|
enabled: {kube_controller_manager}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -181,7 +184,7 @@ kubeControllerManager:
|
|||||||
coreDns:
|
coreDns:
|
||||||
enabled: {core_dns}
|
enabled: {core_dns}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -190,7 +193,7 @@ coreDns:
|
|||||||
kubeEtcd:
|
kubeEtcd:
|
||||||
enabled: {kube_etcd}
|
enabled: {kube_etcd}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -199,7 +202,7 @@ kubeEtcd:
|
|||||||
kubeScheduler:
|
kubeScheduler:
|
||||||
enabled: {kube_scheduler}
|
enabled: {kube_scheduler}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -208,7 +211,7 @@ kubeScheduler:
|
|||||||
kubeProxy:
|
kubeProxy:
|
||||||
enabled: {kube_proxy}
|
enabled: {kube_proxy}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -218,7 +221,7 @@ kubeStateMetrics:
|
|||||||
enabled: {kube_state_metrics}
|
enabled: {kube_state_metrics}
|
||||||
kube-state-metrics:
|
kube-state-metrics:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -227,7 +230,7 @@ kube-state-metrics:
|
|||||||
nodeExporter:
|
nodeExporter:
|
||||||
enabled: {node_exporter}
|
enabled: {node_exporter}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -235,16 +238,16 @@ nodeExporter:
|
|||||||
memory: 250Mi
|
memory: 250Mi
|
||||||
prometheus-node-exporter:
|
prometheus-node-exporter:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
cpu: 200m
|
cpu: 200m
|
||||||
memory: 250Mi
|
memory: 250Mi
|
||||||
prometheusOperator:
|
prometheusOperator:
|
||||||
enabled: {prometheus_operator}
|
enabled: true
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -252,7 +255,7 @@ prometheusOperator:
|
|||||||
memory: 200Mi
|
memory: 200Mi
|
||||||
prometheusConfigReloader:
|
prometheusConfigReloader:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@@ -264,7 +267,7 @@ prometheusOperator:
|
|||||||
limits:
|
limits:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
patch:
|
patch:
|
||||||
@@ -272,7 +275,7 @@ prometheusOperator:
|
|||||||
limits:
|
limits:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
"#,
|
"#,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use std::sync::{Arc, Mutex};
|
|||||||
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use super::{helm::config::KubePrometheusConfig, prometheus::Prometheus};
|
use super::{helm::config::KubePrometheusConfig, prometheus::KubePrometheus};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::monitoring::kube_prometheus::types::ServiceMonitor,
|
modules::monitoring::kube_prometheus::types::ServiceMonitor,
|
||||||
score::Score,
|
score::Score,
|
||||||
@@ -15,8 +15,8 @@ use crate::{
|
|||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct HelmPrometheusAlertingScore {
|
pub struct HelmPrometheusAlertingScore {
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<KubePrometheus>>>,
|
||||||
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
|
pub rules: Vec<Box<dyn AlertRule<KubePrometheus>>>,
|
||||||
pub service_monitors: Vec<ServiceMonitor>,
|
pub service_monitors: Vec<ServiceMonitor>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
|||||||
.expect("couldn't lock config")
|
.expect("couldn't lock config")
|
||||||
.additional_service_monitors = self.service_monitors.clone();
|
.additional_service_monitors = self.service_monitors.clone();
|
||||||
Box::new(AlertingInterpret {
|
Box::new(AlertingInterpret {
|
||||||
sender: Prometheus::new(),
|
sender: KubePrometheus { config },
|
||||||
receivers: self.receivers.clone(),
|
receivers: self.receivers.clone(),
|
||||||
rules: self.rules.clone(),
|
rules: self.rules.clone(),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use crate::{
|
|||||||
modules::monitoring::alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
modules::monitoring::alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||||
score,
|
score,
|
||||||
topology::{
|
topology::{
|
||||||
HelmCommand, K8sAnywhereTopology, Topology,
|
HelmCommand, Topology,
|
||||||
installable::Installable,
|
installable::Installable,
|
||||||
oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender},
|
oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender},
|
||||||
tenant::TenantManager,
|
tenant::TenantManager,
|
||||||
@@ -27,14 +27,14 @@ use super::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertSender for Prometheus {
|
impl AlertSender for KubePrometheus {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"HelmKubePrometheus".to_string()
|
"HelmKubePrometheus".to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for Prometheus {
|
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for KubePrometheus {
|
||||||
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
self.configure_with_topology(topology).await;
|
self.configure_with_topology(topology).await;
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -51,11 +51,11 @@ impl<T: Topology + HelmCommand + TenantManager> Installable<T> for Prometheus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Prometheus {
|
pub struct KubePrometheus {
|
||||||
pub config: Arc<Mutex<KubePrometheusConfig>>,
|
pub config: Arc<Mutex<KubePrometheusConfig>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Prometheus {
|
impl KubePrometheus {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
|
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
|
||||||
@@ -75,7 +75,7 @@ impl Prometheus {
|
|||||||
|
|
||||||
pub async fn install_receiver(
|
pub async fn install_receiver(
|
||||||
&self,
|
&self,
|
||||||
prometheus_receiver: &dyn PrometheusReceiver,
|
prometheus_receiver: &dyn KubePrometheusReceiver,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
||||||
debug!(
|
debug!(
|
||||||
@@ -120,12 +120,12 @@ impl Prometheus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait PrometheusReceiver: Send + Sync + std::fmt::Debug {
|
pub trait KubePrometheusReceiver: Send + Sync + std::fmt::Debug {
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
impl Serialize for Box<dyn AlertReceiver<KubePrometheus>> {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: serde::Serializer,
|
S: serde::Serializer,
|
||||||
@@ -134,19 +134,19 @@ impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertReceiver<Prometheus>> {
|
impl Clone for Box<dyn AlertReceiver<KubePrometheus>> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
self.clone_box()
|
self.clone_box()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait PrometheusRule: Send + Sync + std::fmt::Debug {
|
pub trait KubePrometheusRule: Send + Sync + std::fmt::Debug {
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
impl Serialize for Box<dyn AlertRule<KubePrometheus>> {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: serde::Serializer,
|
S: serde::Serializer,
|
||||||
@@ -155,7 +155,7 @@ impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertRule<Prometheus>> {
|
impl Clone for Box<dyn AlertRule<KubePrometheus>> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
self.clone_box()
|
self.clone_box()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -212,7 +212,7 @@ pub struct ServiceMonitor {
|
|||||||
pub name: String,
|
pub name: String,
|
||||||
|
|
||||||
// # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart
|
// # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart
|
||||||
pub additional_labels: Option<Mapping>,
|
pub additional_labels: Option<HashMap<String, String>>,
|
||||||
|
|
||||||
// # Service label for use in assembling a job name of the form <label value>-<port>
|
// # Service label for use in assembling a job name of the form <label value>-<port>
|
||||||
// # If no label is specified, the service name is used.
|
// # If no label is specified, the service name is used.
|
||||||
@@ -240,7 +240,7 @@ pub struct ServiceMonitor {
|
|||||||
// any: bool,
|
// any: bool,
|
||||||
// # Explicit list of namespace names to select
|
// # Explicit list of namespace names to select
|
||||||
// matchNames: Vec,
|
// matchNames: Vec,
|
||||||
pub namespace_selector: Option<Mapping>,
|
pub namespace_selector: Option<NamespaceSelector>,
|
||||||
|
|
||||||
// # Endpoints of the selected service to be monitored
|
// # Endpoints of the selected service to be monitored
|
||||||
pub endpoints: Vec<ServiceMonitorEndpoint>,
|
pub endpoints: Vec<ServiceMonitorEndpoint>,
|
||||||
@@ -250,6 +250,13 @@ pub struct ServiceMonitor {
|
|||||||
pub fallback_scrape_protocol: Option<String>,
|
pub fallback_scrape_protocol: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Clone)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct NamespaceSelector {
|
||||||
|
pub any: bool,
|
||||||
|
pub match_names: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
impl Default for ServiceMonitor {
|
impl Default for ServiceMonitor {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
pub mod alert_channel;
|
pub mod alert_channel;
|
||||||
pub mod alert_rule;
|
pub mod alert_rule;
|
||||||
|
pub mod application_monitoring;
|
||||||
|
pub mod grafana;
|
||||||
pub mod kube_prometheus;
|
pub mod kube_prometheus;
|
||||||
pub mod ntfy;
|
pub mod ntfy;
|
||||||
|
pub mod prometheus;
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::str::FromStr;
|
|||||||
|
|
||||||
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||||
|
|
||||||
pub fn ntfy_helm_chart_score(namespace: String) -> HelmChartScore {
|
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
|
||||||
let values = format!(
|
let values = format!(
|
||||||
r#"
|
r#"
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
@@ -28,12 +28,12 @@ service:
|
|||||||
port: 80
|
port: 80
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
enabled: false
|
enabled: true
|
||||||
# annotations:
|
# annotations:
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
hosts:
|
hosts:
|
||||||
- host: ntfy.host.com
|
- host: {host}
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: ImplementationSpecific
|
pathType: ImplementationSpecific
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use crate::{
|
|||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct NtfyScore {
|
pub struct NtfyScore {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
|
pub host: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
|
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
|
||||||
@@ -126,7 +127,7 @@ impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
ntfy_helm_chart_score(self.score.namespace.clone())
|
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
|
||||||
.create_interpret()
|
.create_interpret()
|
||||||
.execute(inventory, topology)
|
.execute(inventory, topology)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
1
harmony/src/modules/monitoring/prometheus/helm/mod.rs
Normal file
1
harmony/src/modules/monitoring/prometheus/helm/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod prometheus_helm;
|
||||||
@@ -0,0 +1,47 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use non_blank_string_rs::NonBlankString;
|
||||||
|
|
||||||
|
use crate::modules::{
|
||||||
|
helm::chart::HelmChartScore, monitoring::prometheus::prometheus_config::PrometheusConfig,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn prometheus_helm_chart_score(config: Arc<Mutex<PrometheusConfig>>) -> HelmChartScore {
|
||||||
|
let config = config.lock().unwrap();
|
||||||
|
let ns = config.namespace.clone().unwrap();
|
||||||
|
let values = format!(
|
||||||
|
r#"
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
kube-state-metrics:
|
||||||
|
enabled: false
|
||||||
|
nodeExporter:
|
||||||
|
enabled: false
|
||||||
|
alertmanager:
|
||||||
|
enabled: false
|
||||||
|
pushgateway:
|
||||||
|
enabled: false
|
||||||
|
server:
|
||||||
|
serviceAccount:
|
||||||
|
create: false
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
fullnameOverride: prometheus-{ns}
|
||||||
|
"#
|
||||||
|
);
|
||||||
|
HelmChartScore {
|
||||||
|
namespace: Some(NonBlankString::from_str(&config.namespace.clone().unwrap()).unwrap()),
|
||||||
|
release_name: NonBlankString::from_str("prometheus").unwrap(),
|
||||||
|
chart_name: NonBlankString::from_str(
|
||||||
|
"oci://ghcr.io/prometheus-community/charts/prometheus",
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
chart_version: None,
|
||||||
|
values_overrides: None,
|
||||||
|
values_yaml: Some(values.to_string()),
|
||||||
|
create_namespace: true,
|
||||||
|
install_only: true,
|
||||||
|
repository: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
3
harmony/src/modules/monitoring/prometheus/mod.rs
Normal file
3
harmony/src/modules/monitoring/prometheus/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
pub mod helm;
|
||||||
|
pub mod prometheus;
|
||||||
|
pub mod prometheus_config;
|
||||||
190
harmony/src/modules/monitoring/prometheus/prometheus.rs
Normal file
190
harmony/src/modules/monitoring/prometheus/prometheus.rs
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use log::{debug, error};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
interpret::{InterpretError, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::monitoring::{
|
||||||
|
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||||
|
grafana::helm::helm_grafana::grafana_helm_chart_score,
|
||||||
|
kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig},
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{
|
||||||
|
HelmCommand, Topology,
|
||||||
|
installable::Installable,
|
||||||
|
oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender},
|
||||||
|
tenant::TenantManager,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
helm::prometheus_helm::prometheus_helm_chart_score, prometheus_config::PrometheusConfig,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Prometheus {
|
||||||
|
pub config: Arc<Mutex<PrometheusConfig>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl AlertSender for Prometheus {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"Prometheus".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Prometheus {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
config: Arc::new(Mutex::new(PrometheusConfig::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub async fn configure_with_topology<T: TenantManager>(&self, topology: &T) {
|
||||||
|
let ns = topology
|
||||||
|
.get_tenant_config()
|
||||||
|
.await
|
||||||
|
.map(|cfg| cfg.name.clone())
|
||||||
|
.unwrap_or_else(|| "monitoring".to_string());
|
||||||
|
error!("This must be refactored, see comments in pr #74");
|
||||||
|
debug!("NS: {}", ns);
|
||||||
|
self.config.lock().unwrap().namespace = Some(ns);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn install_receiver(
|
||||||
|
&self,
|
||||||
|
prometheus_receiver: &dyn PrometheusReceiver,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
||||||
|
debug!(
|
||||||
|
"adding alert receiver to prometheus config: {:#?}",
|
||||||
|
&prom_receiver
|
||||||
|
);
|
||||||
|
let mut config = self.config.lock().unwrap();
|
||||||
|
|
||||||
|
config.alert_receiver_configs.push(prom_receiver);
|
||||||
|
let prom_receiver_name = prometheus_receiver.name();
|
||||||
|
debug!("installed alert receiver {}", &prom_receiver_name);
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"Sucessfully installed receiver {}",
|
||||||
|
prom_receiver_name
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn install_rule(
|
||||||
|
&self,
|
||||||
|
prometheus_rule: &AlertManagerRuleGroup,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let prometheus_rule = prometheus_rule.configure_rule().await;
|
||||||
|
let mut config = self.config.lock().unwrap();
|
||||||
|
|
||||||
|
config.alert_rules.push(prometheus_rule.clone());
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"Successfully installed alert rule: {:#?},",
|
||||||
|
prometheus_rule
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn install_prometheus<T: Topology + HelmCommand + Send + Sync>(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
prometheus_helm_chart_score(self.config.clone())
|
||||||
|
.create_interpret()
|
||||||
|
.execute(inventory, topology)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
pub async fn install_grafana<T: Topology + HelmCommand + Send + Sync>(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let namespace = {
|
||||||
|
let config = self.config.lock().unwrap();
|
||||||
|
config.namespace.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(ns) = namespace.as_deref() {
|
||||||
|
grafana_helm_chart_score(ns)
|
||||||
|
.create_interpret()
|
||||||
|
.execute(inventory, topology)
|
||||||
|
.await
|
||||||
|
} else {
|
||||||
|
Err(InterpretError::new(format!(
|
||||||
|
"could not install grafana, missing namespace",
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for Prometheus {
|
||||||
|
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
|
self.configure_with_topology(topology).await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_installed(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
self.install_prometheus(inventory, topology).await?;
|
||||||
|
|
||||||
|
let install_grafana = {
|
||||||
|
let config = self.config.lock().unwrap();
|
||||||
|
config.grafana
|
||||||
|
};
|
||||||
|
|
||||||
|
if install_grafana {
|
||||||
|
self.install_grafana(inventory, topology).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait PrometheusReceiver: Send + Sync + std::fmt::Debug {
|
||||||
|
fn name(&self) -> String;
|
||||||
|
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
||||||
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for Box<dyn AlertReceiver<Prometheus>> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
self.clone_box()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait PrometheusRule: Send + Sync + std::fmt::Debug {
|
||||||
|
fn name(&self) -> String;
|
||||||
|
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
||||||
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for Box<dyn AlertRule<Prometheus>> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
self.clone_box()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
use crate::modules::monitoring::kube_prometheus::types::{
|
||||||
|
AlertManagerAdditionalPromRules, AlertManagerChannelConfig, ServiceMonitor,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PrometheusConfig {
|
||||||
|
pub namespace: Option<String>,
|
||||||
|
pub default_rules: bool,
|
||||||
|
pub alert_manager: bool,
|
||||||
|
pub node_exporter: bool,
|
||||||
|
pub kube_state_metrics: bool,
|
||||||
|
pub grafana: bool,
|
||||||
|
pub prometheus_pushgateway: bool,
|
||||||
|
pub alert_receiver_configs: Vec<AlertManagerChannelConfig>,
|
||||||
|
pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
|
||||||
|
pub additional_service_monitors: Vec<ServiceMonitor>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PrometheusConfig {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
namespace: None,
|
||||||
|
default_rules: true,
|
||||||
|
alert_manager: true,
|
||||||
|
node_exporter: false,
|
||||||
|
kube_state_metrics: false,
|
||||||
|
grafana: true,
|
||||||
|
prometheus_pushgateway: false,
|
||||||
|
alert_receiver_configs: vec![],
|
||||||
|
alert_rules: vec![],
|
||||||
|
additional_service_monitors: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -83,6 +83,7 @@ pub struct Interface {
|
|||||||
pub adv_dhcp_config_advanced: Option<MaybeString>,
|
pub adv_dhcp_config_advanced: Option<MaybeString>,
|
||||||
pub adv_dhcp_config_file_override: Option<MaybeString>,
|
pub adv_dhcp_config_file_override: Option<MaybeString>,
|
||||||
pub adv_dhcp_config_file_override_path: Option<MaybeString>,
|
pub adv_dhcp_config_file_override_path: Option<MaybeString>,
|
||||||
|
pub mtu: Option<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::HAProxy;
|
use crate::HAProxy;
|
||||||
use crate::{data::dhcpd::DhcpInterface, xml_utils::to_xml_str};
|
use crate::{data::dhcpd::DhcpInterface, xml_utils::to_xml_str};
|
||||||
use log::error;
|
use log::{debug, error};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use yaserde::{MaybeString, NamedList, RawXml};
|
use yaserde::{MaybeString, NamedList, RawXml};
|
||||||
use yaserde_derive::{YaDeserialize, YaSerialize};
|
use yaserde_derive::{YaDeserialize, YaSerialize};
|
||||||
@@ -17,12 +17,12 @@ pub struct OPNsense {
|
|||||||
pub dhcpd: NamedList<DhcpInterface>,
|
pub dhcpd: NamedList<DhcpInterface>,
|
||||||
pub snmpd: Snmpd,
|
pub snmpd: Snmpd,
|
||||||
pub syslog: Syslog,
|
pub syslog: Syslog,
|
||||||
pub nat: Nat,
|
pub nat: Option<Nat>,
|
||||||
pub filter: Filters,
|
pub filter: Filters,
|
||||||
pub load_balancer: Option<LoadBalancer>,
|
pub load_balancer: Option<LoadBalancer>,
|
||||||
pub rrd: Option<RawXml>,
|
pub rrd: Option<RawXml>,
|
||||||
pub ntpd: Ntpd,
|
pub ntpd: Ntpd,
|
||||||
pub widgets: Widgets,
|
pub widgets: Option<Widgets>,
|
||||||
pub revision: Revision,
|
pub revision: Revision,
|
||||||
#[yaserde(rename = "OPNsense")]
|
#[yaserde(rename = "OPNsense")]
|
||||||
pub opnsense: OPNsenseXmlSection,
|
pub opnsense: OPNsenseXmlSection,
|
||||||
@@ -46,10 +46,12 @@ pub struct OPNsense {
|
|||||||
pub pischem: Option<Pischem>,
|
pub pischem: Option<Pischem>,
|
||||||
pub ifgroups: Ifgroups,
|
pub ifgroups: Ifgroups,
|
||||||
pub dnsmasq: Option<RawXml>,
|
pub dnsmasq: Option<RawXml>,
|
||||||
|
pub wizardtemp: Option<RawXml>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<String> for OPNsense {
|
impl From<String> for OPNsense {
|
||||||
fn from(content: String) -> Self {
|
fn from(content: String) -> Self {
|
||||||
|
debug!("XML content: {content}");
|
||||||
yaserde::de::from_str(&content)
|
yaserde::de::from_str(&content)
|
||||||
.map_err(|e| println!("{}", e.to_string()))
|
.map_err(|e| println!("{}", e.to_string()))
|
||||||
.expect("OPNSense received invalid string, should be full XML")
|
.expect("OPNSense received invalid string, should be full XML")
|
||||||
@@ -242,6 +244,7 @@ pub struct Ssh {
|
|||||||
pub passwordauth: u8,
|
pub passwordauth: u8,
|
||||||
pub keysig: MaybeString,
|
pub keysig: MaybeString,
|
||||||
pub permitrootlogin: u8,
|
pub permitrootlogin: u8,
|
||||||
|
pub rekeylimit: MaybeString,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
@@ -271,6 +274,7 @@ pub struct Group {
|
|||||||
pub member: Vec<u32>,
|
pub member: Vec<u32>,
|
||||||
#[yaserde(rename = "priv")]
|
#[yaserde(rename = "priv")]
|
||||||
pub priv_field: String,
|
pub priv_field: String,
|
||||||
|
pub source_networks: Vec<MaybeString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
@@ -1506,7 +1510,7 @@ pub struct Vlans {
|
|||||||
|
|
||||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct Bridges {
|
pub struct Bridges {
|
||||||
pub bridged: MaybeString,
|
pub bridged: Option<MaybeString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
|
|||||||
@@ -22,4 +22,4 @@ tokio-util = { version = "0.7.13", features = [ "codec" ] }
|
|||||||
tokio-stream = "0.1.17"
|
tokio-stream = "0.1.17"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions = "1.4.1"
|
pretty_assertions.workspace = true
|
||||||
|
|||||||
Reference in New Issue
Block a user