Compare commits

..

14 Commits

Author SHA1 Message Date
c631b3aef9 fix more opnsense stuff, remove installation notes
All checks were successful
Run Check Script / check (pull_request) Successful in 1m19s
2026-01-22 15:54:19 -05:00
3e2d94cff0 adding Cargo.lock
All checks were successful
Run Check Script / check (pull_request) Successful in 1m19s
2026-01-22 14:54:39 -05:00
c9e39d11ad fix: fix opnsense stuff for opnsense 25.1 test file
Some checks failed
Run Check Script / check (pull_request) Failing after 12s
2026-01-22 14:52:29 -05:00
5ed14b75ed chore: fix formatting 2026-01-22 08:47:56 -05:00
25a45096f8 doc: adding installation notes file 2026-01-21 16:22:59 -05:00
74252ded5c chore: remove useless brocade stuff
Some checks failed
Run Check Script / check (pull_request) Failing after 24s
2026-01-21 15:12:23 -05:00
0ecadbfb97 chore: remove unused import
Some checks failed
Run Check Script / check (pull_request) Failing after 23s
2026-01-21 15:07:35 -05:00
eb492f3ca9 fix: remove double definition of RUST_LOG in env.sh
Some checks failed
Run Check Script / check (pull_request) Failing after 19m39s
2026-01-21 14:02:58 -05:00
de3c8e9a41 adding data symlink
Some checks failed
Run Check Script / check (pull_request) Failing after 31s
2026-01-21 13:59:22 -05:00
2ef2d9f064 Fix HostRole (ControlPlane -> Worker) in workers score, fix main, add topology.rs 2026-01-21 13:56:28 -05:00
d2d18205e9 fix deps in Cargo.toml, create env.sh file 2026-01-21 13:09:26 -05:00
0b55a6fb53 fix: add new xml fields after updating opnsense 2026-01-20 14:27:28 -05:00
001dd5269c add (now commented) line to init env_logger 2026-01-18 10:07:28 -05:00
9978acf16d feat: change staticroutes->route to Option<RawXml> instead of MaybeString 2026-01-18 10:06:15 -05:00
32 changed files with 2711 additions and 635 deletions

100
Cargo.lock generated
View File

@@ -1754,6 +1754,24 @@ dependencies = [
"url",
]
[[package]]
name = "example-ha-cluster"
version = "0.1.0"
dependencies = [
"brocade",
"cidr",
"env_logger",
"harmony",
"harmony_macros",
"harmony_secret",
"harmony_tui",
"harmony_types",
"log",
"serde",
"tokio",
"url",
]
[[package]]
name = "example-kube-rs"
version = "0.1.0"
@@ -1942,9 +1960,28 @@ dependencies = [
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_secret",
"harmony_tui",
"harmony_types",
"log",
"serde",
"tokio",
"url",
]
[[package]]
name = "example-opnsense-node-exporter"
version = "0.1.0"
dependencies = [
"async-trait",
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_types",
"log",
"serde",
@@ -1982,25 +2019,6 @@ dependencies = [
"url",
]
[[package]]
name = "example-opnsense-node-exporter"
version = "0.1.0"
dependencies = [
"async-trait",
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_types",
"log",
"serde",
"tokio",
"url",
]
[[package]]
name = "example-pxe"
version = "0.1.0"
@@ -3464,6 +3482,25 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "json-prompt"
version = "0.1.0"
dependencies = [
"brocade",
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_types",
"log",
"serde",
"tokio",
"url",
]
[[package]]
name = "jsonpath-rust"
version = "0.7.5"
@@ -6062,6 +6099,25 @@ dependencies = [
"syn 2.0.106",
]
[[package]]
name = "sttest"
version = "0.1.0"
dependencies = [
"brocade",
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_types",
"log",
"serde",
"tokio",
"url",
]
[[package]]
name = "subtle"
version = "2.6.1"
@@ -7357,7 +7413,7 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
[[package]]
name = "yaserde"
version = "0.12.0"
source = "git+https://github.com/jggc/yaserde.git#adfdb1c5f4d054f114e5bd0ea7bda9c07a369def"
source = "git+https://github.com/jggc/yaserde.git#2eacb304113beee7270a10b81046d40ed3a99550"
dependencies = [
"log",
"xml-rs",
@@ -7366,7 +7422,7 @@ dependencies = [
[[package]]
name = "yaserde_derive"
version = "0.12.0"
source = "git+https://github.com/jggc/yaserde.git#adfdb1c5f4d054f114e5bd0ea7bda9c07a369def"
source = "git+https://github.com/jggc/yaserde.git#2eacb304113beee7270a10b81046d40ed3a99550"
dependencies = [
"heck",
"log",

View File

@@ -10,7 +10,7 @@ members = [
"opnsense-config",
"opnsense-config-xml",
"harmony_cli",
"harmony_tools",
"k3d",
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",

View File

@@ -0,0 +1,22 @@
[package]
name = "sttest"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde = { workspace = true }
brocade = { path = "../../brocade" }

1
examples/sttest/data Symbolic link
View File

@@ -0,0 +1 @@
../../data/

4
examples/sttest/env.sh Normal file
View File

@@ -0,0 +1,4 @@
export HARMONY_SECRET_NAMESPACE=sttest0
export HARMONY_SECRET_STORE=file
export HARMONY_DATABASE_URL=sqlite://harmony_sttest0.sqlite
export RUST_LOG=info

View File

@@ -0,0 +1,41 @@
mod topology;
use crate::topology::{get_inventory, get_topology};
use harmony::{
config::secret::SshKeyPair,
data::{FileContent, FilePath},
modules::{
inventory::HarmonyDiscoveryStrategy,
okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
},
score::Score,
topology::HAClusterTopology,
};
use harmony_secret::SecretManager;
#[tokio::main]
async fn main() {
// env_logger::init();
let inventory = get_inventory();
let topology = get_topology().await;
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
let mut scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![Box::new(OKDIpxeScore {
kickstart_filename: "inventory.kickstart".to_string(),
harmony_inventory_agent: "harmony_inventory_agent".to_string(),
cluster_pubkey: FileContent {
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
content: ssh_key.public,
},
})];
// let mut scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![];
scores
.append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await);
harmony_cli::run(inventory, topology, scores, None)
.await
.unwrap();
}

View File

@@ -0,0 +1,99 @@
use cidr::Ipv4Cidr;
use harmony::{
hardware::{Location, SwitchGroup},
infra::{brocade::UnmanagedSwitch, opnsense::OPNSenseManagementInterface},
inventory::Inventory,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager};
use serde::{Deserialize, Serialize};
use std::{
net::IpAddr,
sync::{Arc, OnceLock},
};
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct OPNSenseFirewallConfig {
username: String,
password: String,
}
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.40.1"),
name: String::from("fw0"),
};
let switch_client = UnmanagedSwitch::init()
.await
.expect("Failed to connect to switch");
let switch_client = Arc::new(switch_client);
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
let config = config.unwrap();
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(
firewall,
None,
&config.username,
&config.password,
)
.await,
);
let lan_subnet = ipv4!("192.168.40.0");
let gateway_ipv4 = ipv4!("192.168.40.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
kubeconfig: None,
domain_name: "sttest0.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![
LogicalHost {
ip: ip!("192.168.40.20"),
name: "cp0".to_string(),
},
LogicalHost {
ip: ip!("192.168.40.21"),
name: "cp1".to_string(),
},
LogicalHost {
ip: ip!("192.168.40.22"),
name: "cp2".to_string(),
},
],
bootstrap_host: LogicalHost {
ip: ip!("192.168.40.10"),
name: "bootstrap".to_string(),
},
workers: vec![LogicalHost {
ip: ip!("192.168.40.30"),
name: "wk0".to_string(),
}],
node_exporter: opnsense.clone(),
switch_client: switch_client.clone(),
network_manager: OnceLock::new(),
}
}
pub fn get_inventory() -> Inventory {
Inventory {
location: Location::new("Sylvain's basement".to_string(), "Charlesbourg".to_string()),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![],
}
}

View File

@@ -9,14 +9,6 @@ license.workspace = true
testing = []
[dependencies]
opnsense-config = { path = "../opnsense-config" }
opnsense-config-xml = { path = "../opnsense-config-xml" }
harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
harmony_secret_derive = { path = "../harmony_secret_derive" }
harmony_secret = { path = "../harmony_secret" }
harmony_tools = { path = "../harmony_tools" }
hex = "0.4"
reqwest = { version = "0.11", features = [
"blocking",
@@ -34,6 +26,10 @@ log.workspace = true
env_logger.workspace = true
async-trait.workspace = true
cidr.workspace = true
opnsense-config = { path = "../opnsense-config" }
opnsense-config-xml = { path = "../opnsense-config-xml" }
harmony_macros = { path = "../harmony_macros" }
harmony_types = { path = "../harmony_types" }
uuid.workspace = true
url.workspace = true
kube = { workspace = true, features = ["derive"] }
@@ -43,6 +39,7 @@ http.workspace = true
serde-value.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories.workspace = true
lazy_static.workspace = true
dockerfile_builder = "0.1.5"
@@ -74,6 +71,9 @@ base64.workspace = true
thiserror.workspace = true
once_cell = "1.21.3"
walkdir = "2.5.0"
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
harmony_secret_derive = { path = "../harmony_secret_derive" }
harmony_secret = { path = "../harmony_secret" }
askama.workspace = true
sqlx.workspace = true
inquire.workspace = true

View File

@@ -1,11 +0,0 @@
use async_trait::async_trait;
use std::collections::HashMap;
/// Docker Capability
#[async_trait]
pub trait Docker {
async fn ensure_installed(&self) -> Result<(), String>;
fn get_docker_env(&self) -> HashMap<String, String>;
fn docker_command(&self) -> std::process::Command;
}

View File

@@ -16,7 +16,7 @@ use kube::{
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
core::{DynamicResourceScope, ErrorResponse},
discovery::{ApiCapabilities, Scope},
error::DiscoveryError,
runtime::reflector::Lookup,

View File

@@ -1,13 +1,7 @@
use std::{
collections::{BTreeMap, HashMap},
process::Command,
sync::Arc,
time::Duration,
};
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
use async_trait::async_trait;
use base64::{Engine, engine::general_purpose};
use harmony_tools::K3d;
use harmony_types::rfc1123::Rfc1123Name;
use k8s_openapi::api::{
core::v1::Secret,
@@ -19,12 +13,10 @@ use serde::Serialize;
use tokio::sync::OnceCell;
use crate::{
config::HARMONY_DATA_DIR,
executors::ExecutorError,
interpret::InterpretStatus,
inventory::Inventory,
modules::{
docker::DockerInstallationScore,
k3d::K3DInstallationScore,
k8s::ingress::{K8sIngressScore, PathType},
monitoring::{
@@ -50,7 +42,7 @@ use crate::{
},
},
score::Score,
topology::{Docker, TlsRoute, TlsRouter, ingress::Ingress},
topology::{TlsRoute, TlsRouter, ingress::Ingress},
};
use super::super::{
@@ -358,24 +350,6 @@ impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
}
}
#[async_trait]
impl Docker for K8sAnywhereTopology {
async fn ensure_installed(&self) -> Result<(), String> {
DockerInstallationScore::default()
.interpret(&Inventory::empty(), self)
.await
.map_err(|e| format!("Could not ensure docker is installed : {e}"))?;
Ok(())
}
fn get_docker_env(&self) -> HashMap<String, String> {
harmony_tools::Docker::new(HARMONY_DATA_DIR.join("docker")).get_docker_env()
}
fn docker_command(&self) -> std::process::Command {
harmony_tools::Docker::new(HARMONY_DATA_DIR.join("docker")).command()
}
}
impl Serialize for K8sAnywhereTopology {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
@@ -763,7 +737,7 @@ impl K8sAnywhereTopology {
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
// good implementation due to the stateful nature of the k3d thing. Which is why I went
// with this solution for now
let k3d = K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
let state = match k3d.get_client().await {
Ok(client) => K8sState {
client: Arc::new(K8sClient::new(client)),

View File

@@ -1,10 +1,8 @@
mod docker;
mod failover;
mod ha_cluster;
pub mod ingress;
pub mod node_exporter;
pub mod opnsense;
pub use docker::*;
pub use failover::*;
use harmony_types::net::IpAddress;
mod host_binding;

View File

@@ -1,79 +0,0 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::debug;
use serde::Serialize;
use crate::{
config::HARMONY_DATA_DIR,
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{Docker, Topology},
};
use harmony_types::id::Id;
#[derive(Debug, Clone, Serialize)]
pub struct DockerInstallationScore {
pub installation_path: PathBuf,
}
impl Default for DockerInstallationScore {
fn default() -> Self {
Self {
installation_path: HARMONY_DATA_DIR.join("docker"),
}
}
}
impl<T: Topology + Docker> Score<T> for DockerInstallationScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(DockerInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
"DockerInstallationScore".into()
}
}
#[derive(Debug)]
pub struct DockerInstallationInterpret {
score: DockerInstallationScore,
}
#[async_trait]
impl<T: Topology + Docker> Interpret<T> for DockerInstallationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let docker = harmony_tools::Docker::new(self.score.installation_path.clone());
match docker.ensure_installed().await {
Ok(_) => {
let msg = "Docker is installed and ready".to_string();
debug!("{msg}");
Ok(Outcome::success(msg))
}
Err(msg) => Err(InterpretError::new(format!(
"failed to ensure docker is installed : {msg}"
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("DockerInstallation")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@@ -1,7 +1,6 @@
use std::path::PathBuf;
use async_trait::async_trait;
use harmony_tools::K3d;
use log::debug;
use serde::Serialize;
@@ -11,7 +10,7 @@ use crate::{
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{Docker, Topology},
topology::Topology,
};
use harmony_types::id::Id;
@@ -30,7 +29,7 @@ impl Default for K3DInstallationScore {
}
}
impl<T: Topology + Docker> Score<T> for K3DInstallationScore {
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(K3dInstallationInterpret {
score: self.clone(),
@@ -48,25 +47,19 @@ pub struct K3dInstallationInterpret {
}
#[async_trait]
impl<T: Topology + Docker> Interpret<T> for K3dInstallationInterpret {
impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
_topology: &T,
) -> Result<Outcome, InterpretError> {
let k3d = K3d::new(
let k3d = k3d_rs::K3d::new(
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
Docker::ensure_installed(topology)
.await
.map_err(|e| InterpretError::new(format!("Docker requirement for k3d failed: {e}")))?;
match k3d.ensure_installed().await {
Ok(_client) => {
// Ensure Docker is also ready as k3d depends on it
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
debug!("{msg}");
Ok(Outcome::success(msg))

View File

@@ -4,7 +4,6 @@ pub mod brocade;
pub mod cert_manager;
pub mod dhcp;
pub mod dns;
pub mod docker;
pub mod dummy;
pub mod helm;
pub mod http;

View File

@@ -22,7 +22,7 @@ pub struct OKDSetup04WorkersScore {
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
Box::new(OKDNodeInterpret::new(
HostRole::ControlPlane,
HostRole::Worker,
self.discovery_strategy.clone(),
))
}

View File

@@ -1,326 +0,0 @@
use crate::downloadable_asset::DownloadableAsset;
use inquire::Select;
use log::{debug, error, info, trace, warn};
use std::collections::HashMap;
use std::fmt;
use std::path::PathBuf;
use url::Url;
pub struct Docker {
base_dir: PathBuf,
}
#[derive(Debug, PartialEq)]
pub enum DockerVariant {
Standard,
Rootless,
Manual,
}
impl fmt::Display for DockerVariant {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DockerVariant::Standard => write!(f, "Standard Docker (requires sudo)"),
DockerVariant::Rootless => write!(f, "Rootless Docker (no sudo required)"),
DockerVariant::Manual => {
write!(f, "Exit and install manually (Docker or podman-docker)")
}
}
}
}
impl Docker {
pub fn new(base_dir: PathBuf) -> Self {
Self { base_dir }
}
/// Provides the DOCKER_HOST and DOCKER_SOCK env vars for local usage.
///
/// If a rootless Docker installation is detected in the user's home directory,
/// it returns the appropriate `DOCKER_HOST` pointing to the user's Docker socket.
/// Otherwise, it returns an empty HashMap, assuming the standard system-wide
/// Docker installation is used.
pub fn get_docker_env(&self) -> HashMap<String, String> {
let mut env = HashMap::new();
if let Ok(home) = std::env::var("HOME") {
let rootless_sock = PathBuf::from(&home).join(".docker/run/docker.sock");
let rootless_bin = PathBuf::from(&home).join("bin/docker");
if rootless_bin.exists() && rootless_sock.exists() {
let docker_host = format!("unix://{}", rootless_sock.display());
debug!(
"Detected rootless Docker, setting DOCKER_HOST={}",
docker_host
);
env.insert("DOCKER_HOST".to_string(), docker_host);
}
}
env
}
/// Gets the path to the docker binary
pub fn get_bin_path(&self) -> PathBuf {
// Check standard PATH first
if let Ok(path) = std::process::Command::new("which")
.arg("docker")
.output()
.map(|o| PathBuf::from(String::from_utf8_lossy(&o.stdout).trim()))
{
if path.exists() {
debug!("Found Docker in PATH: {:?}", path);
return path;
}
}
// Check common rootless location
if let Ok(home) = std::env::var("HOME") {
let rootless_path = PathBuf::from(home).join("bin/docker");
if rootless_path.exists() {
debug!("Found rootless Docker at: {:?}", rootless_path);
return rootless_path;
}
}
debug!("Docker not found in PATH or rootless location, using 'docker' from PATH");
PathBuf::from("docker")
}
/// Checks if Docker is installed and the daemon is responsive.
pub fn is_installed(&self) -> bool {
trace!("Checking if Docker is installed and responsive");
self.command()
.arg("info")
.output()
.map(|output| {
if output.status.success() {
trace!("Docker daemon is responsive");
true
} else {
trace!(
"Docker daemon check failed with status: {:?}",
output.status
);
false
}
})
.map_err(|e| {
trace!("Failed to execute Docker daemon check: {}", e);
e
})
.unwrap_or(false)
}
/// Prompts the user to choose an installation method
fn prompt_for_installation(&self) -> DockerVariant {
let options = vec![
DockerVariant::Standard,
DockerVariant::Rootless,
DockerVariant::Manual,
];
Select::new(
"Docker binary was not found. How would you like to proceed?",
options,
)
.with_help_message("Standard requires sudo. Rootless runs in user space.")
.prompt()
.unwrap_or(DockerVariant::Manual)
}
/// Installs docker using the official shell script
pub async fn install(&self, variant: DockerVariant) -> Result<(), String> {
let (script_url, script_name, use_sudo) = match variant {
DockerVariant::Standard => ("https://get.docker.com", "get-docker.sh", true),
DockerVariant::Rootless => (
"https://get.docker.com/rootless",
"get-docker-rootless.sh",
false,
),
DockerVariant::Manual => return Err("Manual installation selected".to_string()),
};
info!("Installing {}...", variant);
debug!("Downloading installation script from: {}", script_url);
// Download the installation script
let asset = DownloadableAsset {
url: Url::parse(script_url).map_err(|e| {
error!("Failed to parse installation script URL: {}", e);
format!("Failed to parse installation script URL: {}", e)
})?,
file_name: script_name.to_string(),
checksum: None,
};
let downloaded_script = asset
.download_to_path(self.base_dir.join("scripts"))
.await
.map_err(|e| {
error!("Failed to download installation script: {}", e);
format!("Failed to download installation script: {}", e)
})?;
debug!("Installation script downloaded to: {:?}", downloaded_script);
// Execute the installation script
let mut cmd = std::process::Command::new("sh");
if use_sudo {
cmd.arg("sudo").arg("sh");
}
cmd.arg(&downloaded_script);
debug!("Executing installation command: {:?}", cmd);
let status = cmd.status().map_err(|e| {
error!("Failed to execute docker installation script: {}", e);
format!("Failed to execute docker installation script: {}", e)
})?;
if status.success() {
info!("{} installed successfully", variant);
if variant == DockerVariant::Rootless {
info!("Running rootless setup tool to install dependencies and start service...");
let mut setup_cmd = std::process::Command::new("sh");
// Set PATH to include ~/bin where the script was likely installed
if let Ok(home) = std::env::var("HOME") {
let bin_path = format!("{}/bin", home);
if let Ok(current_path) = std::env::var("PATH") {
setup_cmd.env("PATH", format!("{}:{}", bin_path, current_path));
}
setup_cmd.arg(format!("{}/bin/dockerd-rootless-setuptool.sh", home));
} else {
setup_cmd.arg("dockerd-rootless-setuptool.sh");
}
setup_cmd.arg("install");
debug!("Executing rootless setup command: {:?}", setup_cmd);
let setup_status = setup_cmd.status().map_err(|e| {
error!("Failed to execute rootless setup tool: {}", e);
format!("Failed to execute rootless setup tool: {}", e)
})?;
if !setup_status.success() {
warn!("Rootless setup tool finished with non-zero exit code. You may need to install 'uidmap' or start the service manually.");
}
warn!("Please follow the instructions above to finish rootless setup (environment variables).");
}
// Validate the installation by running hello-world
self.validate_installation()?;
Ok(())
} else {
error!(
"{} installation script failed with exit code: {:?} \n\nOutput:\n{:?}",
variant,
status.code(),
cmd.output(),
);
Err(format!("{} installation script failed", variant))
}
}
/// Validates the Docker installation by running a test container.
///
/// This method runs `docker run --rm hello-world` to verify that Docker
/// is properly installed and functional.
fn validate_installation(&self) -> Result<(), String> {
info!("Validating Docker installation by running hello-world container...");
let output = self
.command()
.args(["run", "--rm", "hello-world"])
.output()
.map_err(|e| {
error!("Failed to execute hello-world validation: {}", e);
format!("Failed to execute hello-world validation: {}", e)
})?;
if output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout);
if stdout.contains("Hello from Docker!") {
info!("Docker installation validated successfully");
trace!("Validation output: {}", stdout);
Ok(())
} else {
warn!("Hello-world container ran but expected output not found");
debug!("Output was: {}", stdout);
Err("Docker validation failed: unexpected output from hello-world".to_string())
}
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
error!(
"Hello-world validation failed with exit code: {:?}",
output.status.code()
);
debug!("Validation stderr: {}", stderr);
if !stderr.is_empty() {
Err(format!("Docker validation failed: {}", stderr.trim()))
} else {
Err(
"Docker validation failed: hello-world container did not run successfully"
.to_string(),
)
}
}
}
/// Ensures docker is installed, prompting if necessary
pub async fn ensure_installed(&self) -> Result<(), String> {
if self.is_installed() {
debug!("Docker is already installed at: {:?}", self.get_bin_path());
return Ok(());
}
debug!("Docker is not installed, prompting for installation method");
match self.prompt_for_installation() {
DockerVariant::Manual => {
info!("User chose manual installation");
Err("Docker installation cancelled by user. Please install docker or podman-docker manually.".to_string())
}
variant => self.install(variant).await,
}
}
/// Creates a pre-configured Command for running Docker commands.
///
/// The returned Command is set up with:
/// - The correct Docker binary path (handles rootless installations)
/// - Appropriate environment variables (e.g., DOCKER_HOST for rootless)
///
/// # Example
///
/// ```no_run
/// # use harmony_tools::Docker;
/// # use std::path::PathBuf;
/// # let docker = Docker::new(PathBuf::from("."));
/// let mut cmd = docker.command();
/// cmd.args(["ps", "-a"]);
/// // Now cmd is ready to be executed
/// ```
pub fn command(&self) -> std::process::Command {
let bin_path = self.get_bin_path();
trace!("Creating Docker command with binary: {:?}", bin_path);
let mut cmd = std::process::Command::new(&bin_path);
// Add Docker-specific environment variables
let env = self.get_docker_env();
if !env.is_empty() {
trace!("Setting Docker environment variables: {:?}", env);
for (key, value) in env {
cmd.env(key, value);
}
} else {
trace!("No Docker-specific environment variables to set");
}
cmd
}
}

View File

@@ -1,6 +0,0 @@
mod docker;
mod downloadable_asset;
mod k3d;
pub use docker::*;
use downloadable_asset::*;
pub use k3d::*;

View File

@@ -1,6 +1,5 @@
[package]
name = "harmony_tools"
description = "Install tools such as k3d, docker and more"
name = "k3d-rs"
edition = "2021"
version.workspace = true
readme.workspace = true
@@ -17,7 +16,6 @@ url.workspace = true
sha2 = "0.10.8"
futures-util = "0.3.31"
kube.workspace = true
inquire.workspace = true
[dev-dependencies]
env_logger = { workspace = true }

View File

@@ -39,20 +39,11 @@ const CHECKSUM_FAILED_MSG: &str = "Downloaded file failed checksum verification"
pub(crate) struct DownloadableAsset {
pub(crate) url: Url,
pub(crate) file_name: String,
pub(crate) checksum: Option<String>,
pub(crate) checksum: String,
}
impl DownloadableAsset {
fn verify_checksum(&self, file: PathBuf) -> bool {
// Skip verification if no checksum is provided
let expected_checksum = match &self.checksum {
Some(checksum) => checksum,
None => {
debug!("No checksum provided, skipping verification");
return file.exists();
}
};
if !file.exists() {
debug!("File does not exist: {:?}", file);
return false;
@@ -85,10 +76,10 @@ impl DownloadableAsset {
let result = hasher.finalize();
let calculated_hash = format!("{:x}", result);
debug!("Expected checksum: {}", expected_checksum);
debug!("Expected checksum: {}", self.checksum);
debug!("Calculated checksum: {}", calculated_hash);
calculated_hash == *expected_checksum
calculated_hash == self.checksum
}
/// Downloads the asset to the specified directory, verifying its checksum.
@@ -160,8 +151,7 @@ impl DownloadableAsset {
file.flush().await.expect("Failed to flush file");
drop(file);
// Only verify checksum if one was provided
if self.checksum.is_some() && !self.verify_checksum(target_file_path.clone()) {
if !self.verify_checksum(target_file_path.clone()) {
return Err(CHECKSUM_FAILED_MSG.to_string());
}
@@ -212,7 +202,7 @@ mod tests {
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: Some(TEST_CONTENT_HASH.to_string()),
checksum: TEST_CONTENT_HASH.to_string(),
};
let result = asset
@@ -236,7 +226,7 @@ mod tests {
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: Some(TEST_CONTENT_HASH.to_string()),
checksum: TEST_CONTENT_HASH.to_string(),
};
let target_file_path = folder.join(&asset.file_name);
@@ -258,7 +248,7 @@ mod tests {
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: Some(TEST_CONTENT_HASH.to_string()),
checksum: TEST_CONTENT_HASH.to_string(),
};
let result = asset.download_to_path(folder.join("error")).await;
@@ -279,7 +269,7 @@ mod tests {
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: Some(TEST_CONTENT_HASH.to_string()),
checksum: TEST_CONTENT_HASH.to_string(),
};
let join_handle =
@@ -303,58 +293,11 @@ mod tests {
let asset = DownloadableAsset {
url: Url::parse(&server.url("/specific/path.txt").to_string()).unwrap(),
file_name: "path.txt".to_string(),
checksum: Some(TEST_CONTENT_HASH.to_string()),
checksum: TEST_CONTENT_HASH.to_string(),
};
let result = asset.download_to_path(folder).await.unwrap();
let downloaded_content = std::fs::read_to_string(result).unwrap();
assert_eq!(downloaded_content, TEST_CONTENT);
}
#[tokio::test]
async fn test_download_without_checksum() {
let (folder, server) = setup_test();
server.expect(
Expectation::matching(matchers::any())
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
);
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: None,
};
let result = asset
.download_to_path(folder.join("no_checksum"))
.await
.unwrap();
let downloaded_content = std::fs::read_to_string(result).unwrap();
assert_eq!(downloaded_content, TEST_CONTENT);
}
#[tokio::test]
async fn test_download_without_checksum_already_exists() {
let (folder, server) = setup_test();
server.expect(
Expectation::matching(matchers::any())
.times(0)
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
);
let asset = DownloadableAsset {
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
file_name: "test.txt".to_string(),
checksum: None,
};
let target_file_path = folder.join(&asset.file_name);
std::fs::write(&target_file_path, TEST_CONTENT).unwrap();
let result = asset.download_to_path(folder).await.unwrap();
let content = std::fs::read_to_string(result).unwrap();
assert_eq!(content, TEST_CONTENT);
}
}

View File

@@ -1,9 +1,10 @@
mod downloadable_asset;
use downloadable_asset::*;
use kube::Client;
use log::{debug, info};
use std::{ffi::OsStr, path::PathBuf};
use crate::downloadable_asset::DownloadableAsset;
const K3D_BIN_FILE_NAME: &str = "k3d";
pub struct K3d {
@@ -77,7 +78,6 @@ impl K3d {
debug!("Found binary at {} with checksum {}", binary_url, checksum);
let checksum = Some(checksum);
DownloadableAsset {
url: binary_url,
file_name: K3D_BIN_FILE_NAME.to_string(),
@@ -399,7 +399,7 @@ mod test {
use regex::Regex;
use std::path::PathBuf;
use crate::{k3d::K3D_BIN_FILE_NAME, K3d};
use crate::{K3d, K3D_BIN_FILE_NAME};
#[tokio::test]
async fn k3d_latest_release_should_get_latest() {

View File

@@ -9,6 +9,7 @@ license.workspace = true
serde = { version = "1.0.123", features = [ "derive" ] }
log = { workspace = true }
env_logger = { workspace = true }
#yaserde = { path = "../../yaserde/yaserde" }
yaserde = { git = "https://github.com/jggc/yaserde.git" }
yaserde_derive = { git = "https://github.com/jggc/yaserde.git" }
xml-rs = "0.8"

View File

@@ -8,6 +8,8 @@ pub struct Pischem {
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Caddy {
#[yaserde(attribute = true)]
pub version: Option<String>,
pub general: CaddyGeneral,
pub reverseproxy: MaybeString,
}

View File

@@ -8,6 +8,8 @@ pub struct DnsMasq {
pub version: String,
#[yaserde(attribute = true)]
pub persisted_at: Option<String>,
#[yaserde(attribute = true)]
pub description: Option<String>,
pub enable: u8,
pub regdhcp: u8,
@@ -23,7 +25,7 @@ pub struct DnsMasq {
pub dnssec: u8,
pub regdhcpdomain: MaybeString,
pub interface: Option<String>,
pub port: Option<u32>,
pub port: Option<MaybeString>,
pub dns_forward_max: MaybeString,
pub cache_size: MaybeString,
pub local_ttl: MaybeString,
@@ -73,6 +75,8 @@ pub struct Dhcp {
pub reply_delay: MaybeString,
pub enable_ra: u8,
pub nosync: u8,
pub log_dhcp: Option<u8>,
pub log_quiet: Option<u8>,
}
// Represents a single <dhcp_ranges> element.

View File

@@ -598,7 +598,7 @@ pub struct HAProxyServer {
pub ssl_client_certificate: MaybeString,
#[yaserde(rename = "maxConnections")]
pub max_connections: MaybeString,
pub weight: Option<u32>,
pub weight: Option<MaybeString>,
#[yaserde(rename = "checkInterval")]
pub check_interval: MaybeString,
#[yaserde(rename = "checkDownInterval")]

View File

@@ -30,6 +30,7 @@ pub struct OPNsense {
pub staticroutes: StaticRoutes,
pub ca: MaybeString,
pub gateways: Option<RawXml>,
pub hostwatch: Option<RawXml>,
pub cert: Vec<Cert>,
pub dhcpdv6: DhcpDv6,
pub virtualip: VirtualIp,
@@ -162,11 +163,15 @@ pub struct Username {
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Sysctl {
#[yaserde(attribute = true)]
pub version: Option<String>,
pub item: Vec<SysctlItem>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct SysctlItem {
#[yaserde(attribute = true)]
pub uuid: Option<String>,
pub descr: Option<MaybeString>,
pub tunable: Option<String>,
pub value: Option<MaybeString>,
@@ -174,6 +179,8 @@ pub struct SysctlItem {
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct System {
#[yaserde(attribute = true)]
pub uuid: Option<String>,
pub use_mfs_tmp: Option<MaybeString>,
pub use_mfs_var: Option<MaybeString>,
pub serialspeed: u32,
@@ -268,6 +275,8 @@ pub struct Bogons {
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Group {
#[yaserde(attribute = true)]
pub uuid: Option<String>,
pub name: String,
pub description: Option<String>,
pub scope: String,
@@ -280,6 +289,8 @@ pub struct Group {
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct User {
#[yaserde(attribute = true)]
pub uuid: Option<String>,
pub name: String,
pub descr: MaybeString,
pub scope: String,
@@ -463,6 +474,8 @@ pub struct OPNsenseXmlSection {
pub openvpn: ConfigOpenVPN,
#[yaserde(rename = "Gateways")]
pub gateways: RawXml,
#[yaserde(rename = "Hostwatch")]
pub hostwatch: Option<RawXml>,
#[yaserde(rename = "HAProxy")]
pub haproxy: Option<HAProxy>,
}
@@ -1143,9 +1156,9 @@ pub struct UnboundGeneral {
pub dns64: MaybeString,
pub dns64prefix: MaybeString,
pub noarecords: MaybeString,
pub regdhcp: Option<i8>,
pub regdhcp: Option<MaybeString>,
pub regdhcpdomain: MaybeString,
pub regdhcpstatic: Option<i8>,
pub regdhcpstatic: Option<MaybeString>,
pub noreglladdr6: MaybeString,
pub noregrecords: MaybeString,
pub txtsupport: MaybeString,
@@ -1153,27 +1166,27 @@ pub struct UnboundGeneral {
pub local_zone_type: String,
pub outgoing_interface: MaybeString,
pub enable_wpad: MaybeString,
pub safesearch: MaybeString,
pub safesearch: Option<MaybeString>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Advanced {
pub hideidentity: Option<i8>,
pub hideversion: Option<i8>,
pub prefetch: Option<i8>,
pub prefetchkey: Option<i8>,
pub dnssecstripped: Option<i8>,
pub hideidentity: Option<MaybeString>,
pub hideversion: Option<MaybeString>,
pub prefetch: Option<MaybeString>,
pub prefetchkey: Option<MaybeString>,
pub dnssecstripped: Option<MaybeString>,
pub aggressivensec: Option<i8>,
pub serveexpired: Option<i8>,
pub serveexpired: Option<MaybeString>,
pub serveexpiredreplyttl: MaybeString,
pub serveexpiredttl: MaybeString,
pub serveexpiredttlreset: Option<i32>,
pub serveexpiredttlreset: Option<MaybeString>,
pub serveexpiredclienttimeout: MaybeString,
pub qnameminstrict: Option<i32>,
pub extendedstatistics: Option<i32>,
pub logqueries: Option<i32>,
pub logreplies: Option<i32>,
pub logtagqueryreply: Option<i32>,
pub qnameminstrict: Option<MaybeString>,
pub extendedstatistics: Option<MaybeString>,
pub logqueries: Option<MaybeString>,
pub logreplies: Option<MaybeString>,
pub logtagqueryreply: Option<MaybeString>,
pub logservfail: MaybeString,
pub loglocalactions: MaybeString,
pub logverbosity: i32,
@@ -1216,12 +1229,12 @@ pub struct Dnsbl {
pub blocklists: Option<MaybeString>,
pub wildcards: Option<MaybeString>,
pub address: Option<MaybeString>,
pub nxdomain: Option<i32>,
pub nxdomain: Option<MaybeString>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Forwarding {
pub enabled: Option<i32>,
pub enabled: Option<MaybeString>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
@@ -1243,7 +1256,7 @@ pub struct Host {
pub ttl: Option<MaybeString>,
pub server: String,
pub description: Option<String>,
pub txtdata: MaybeString,
pub txtdata: Option<MaybeString>,
}
impl Host {
@@ -1259,7 +1272,7 @@ impl Host {
ttl: Some(MaybeString::default()),
mx: MaybeString::default(),
description: None,
txtdata: MaybeString::default(),
txtdata: Some(MaybeString::default()),
}
}
}
@@ -1421,7 +1434,7 @@ pub struct StaticRoutes {
#[yaserde(attribute = true)]
pub version: String,
#[yaserde(rename = "route")]
pub route: Option<MaybeString>,
pub route: Option<RawXml>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]

View File

@@ -234,14 +234,15 @@ mod tests {
#[tokio::test]
async fn test_load_config_from_local_file() {
for path in [
// "src/tests/data/config-opnsense-25.1.xml",
// "src/tests/data/config-vm-test.xml",
"src/tests/data/config-opnsense-25.1.xml",
"src/tests/data/config-vm-test.xml",
"src/tests/data/config-structure.xml",
"src/tests/data/config-full-1.xml",
// "src/tests/data/config-full-ncd0.xml",
// "src/tests/data/config-full-25.7.xml",
// "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
"src/tests/data/config-25.7-dnsmasq-static-host.xml",
"src/tests/data/config-full-25.7.11_2.xml",
] {
let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
test_file_path.push(path);

View File

@@ -1,4 +1,4 @@
use opnsense_config_xml::{Host, OPNsense};
use opnsense_config_xml::{Host, MaybeString, OPNsense};
pub struct UnboundDnsConfig<'a> {
opnsense: &'a mut OPNsense,
@@ -31,7 +31,8 @@ impl<'a> UnboundDnsConfig<'a> {
None => todo!("Handle case where unboundplus is not used"),
};
unbound.general.regdhcp = Some(register as i8);
unbound.general.regdhcpstatic = Some(register as i8);
unbound.general.regdhcp = Some(MaybeString::from_bool_as_int("regdhcp", register));
unbound.general.regdhcpstatic =
Some(MaybeString::from_bool_as_int("regdhcpstatic", register));
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -271,7 +271,6 @@
</firmware>
<language>en_US</language>
<dnsserver>1.1.1.1</dnsserver>
<dnsserver>8.8.8.8</dnsserver>
<dns1gw>none</dns1gw>
<dns2gw>none</dns2gw>
<dns3gw>none</dns3gw>

View File

@@ -30,28 +30,17 @@
<item uuid="b6b18051-830f-4b27-81ec-f772b14681e2">
<tunable>net.inet.ip.sourceroute</tunable>
<value>default</value>
<descr>
Source routing is another way for an attacker to try to reach non-routable addresses behind your box.
It can also be used to probe for information about your internal networks. These functions come enabled
as part of the standard FreeBSD core system.
</descr>
<descr>Source routing is another way for an attacker to try to reach non-routable addresses behind your box. It can also be used to probe for information about your internal networks. These functions come enabled as part of the standard FreeBSD core system.</descr>
</item>
<item uuid="ea21409c-62d6-4040-aa2b-36bd01af5578">
<tunable>net.inet.ip.accept_sourceroute</tunable>
<value>default</value>
<descr>
Source routing is another way for an attacker to try to reach non-routable addresses behind your box.
It can also be used to probe for information about your internal networks. These functions come enabled
as part of the standard FreeBSD core system.
</descr>
<descr>Source routing is another way for an attacker to try to reach non-routable addresses behind your box. It can also be used to probe for information about your internal networks. These functions come enabled as part of the standard FreeBSD core system.</descr>
</item>
<item uuid="1613256c-ef7e-4b53-a44c-234440046293">
<tunable>net.inet.icmp.log_redirect</tunable>
<value>default</value>
<descr>
This option turns off the logging of redirect packets because there is no limit and this could fill
up your logs consuming your whole hard drive.
</descr>
<descr>This option turns off the logging of redirect packets because there is no limit and this could fill up your logs consuming your whole hard drive.</descr>
</item>
<item uuid="1ba88c72-6e5b-4f19-abba-351c2b76d5dc">
<tunable>net.inet.tcp.drop_synfin</tunable>
@@ -181,9 +170,7 @@
<item uuid="2c42ae2f-a7bc-48cb-b27d-db72e738e80b">
<tunable>net.inet.ip.redirect</tunable>
<value>default</value>
<descr>Enable/disable sending of ICMP redirects in response to IP packets for which a better,
and for the sender directly reachable, route and next hop is known.
</descr>
<descr>Enable/disable sending of ICMP redirects in response to IP packets for which a better, and for the sender directly reachable, route and next hop is known.</descr>
</item>
<item uuid="7d315fb1-c638-4b79-9f6c-240b41e6d643">
<tunable>net.local.dgram.maxdgram</tunable>
@@ -938,4 +925,3 @@
</cert>
<syslog/>
</opnsense>

View File

@@ -28,28 +28,17 @@
<value>default</value>
</item>
<item>
<descr>
Source routing is another way for an attacker to try to reach non-routable addresses behind your box.
It can also be used to probe for information about your internal networks. These functions come enabled
as part of the standard FreeBSD core system.
</descr>
<descr>Source routing is another way for an attacker to try to reach non-routable addresses behind your box. It can also be used to probe for information about your internal networks. These functions come enabled as part of the standard FreeBSD core system.</descr>
<tunable>net.inet.ip.sourceroute</tunable>
<value>default</value>
</item>
<item>
<descr>
Source routing is another way for an attacker to try to reach non-routable addresses behind your box.
It can also be used to probe for information about your internal networks. These functions come enabled
as part of the standard FreeBSD core system.
</descr>
<descr>Source routing is another way for an attacker to try to reach non-routable addresses behind your box. It can also be used to probe for information about your internal networks. These functions come enabled as part of the standard FreeBSD core system.</descr>
<tunable>net.inet.ip.accept_sourceroute</tunable>
<value>default</value>
</item>
<item>
<descr>
This option turns off the logging of redirect packets because there is no limit and this could fill
up your logs consuming your whole hard drive.
</descr>
<descr>This option turns off the logging of redirect packets because there is no limit and this could fill up your logs consuming your whole hard drive.</descr>
<tunable>net.inet.icmp.log_redirect</tunable>
<value>default</value>
</item>
@@ -179,9 +168,7 @@
<value>default</value>
</item>
<item>
<descr>Enable/disable sending of ICMP redirects in response to IP packets for which a better,
and for the sender directly reachable, route and next hop is known.
</descr>
<descr>Enable/disable sending of ICMP redirects in response to IP packets for which a better, and for the sender directly reachable, route and next hop is known.</descr>
<tunable>net.inet.ip.redirect</tunable>
<value>default</value>
</item>